/**************************************************************************** * Copyright (C) 2012-2015 Woboq GmbH * Olivier Goffart * https://woboq.com/codebrowser.html * * This file is part of the Woboq Code Browser. * * Commercial License Usage: * Licensees holding valid commercial licenses provided by Woboq may use * this file in accordance with the terms contained in a written agreement * between the licensee and Woboq. * For further information see https://woboq.com/codebrowser.html * * Alternatively, this work may be used under a Creative Commons * Attribution-NonCommercial-ShareAlike 3.0 (CC-BY-NC-SA 3.0) License. * http://creativecommons.org/licenses/by-nc-sa/3.0/deed.en_US * This license does not allow you to use the code browser to assist the * development of your commercial software. If you intent to do so, consider * purchasing a commercial licence. ****************************************************************************/ #pragma once #include #include #include struct EmbeddedFile { const char *filename; const char *content; size_t size; template constexpr EmbeddedFile(const char *filename, const char (&data)[N]) : filename(filename) , content(data), size(N-1) {} constexpr EmbeddedFile () : filename(nullptr) , content(nullptr), size(0) {} }; static constexpr EmbeddedFile EmbeddedFiles[] = { { "/builtins/__clang_cuda_builtin_vars.h" , "/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CUDA_BUILTIN_VARS_H\n" "#define __CUDA_BUILTIN_VARS_H\n" "\n" "// Forward declares from vector_types.h.\n" "struct uint3;\n" "struct dim3;\n" "\n" "// The file implements built-in CUDA variables using __declspec(property).\n" "// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx\n" "// All read accesses of built-in variable fields get converted into calls to a\n" "// getter function which in turn calls the appropriate builtin to fetch the\n" "// value.\n" "//\n" "// Example:\n" "// int x = threadIdx.x;\n" "// IR output:\n" "// %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3\n" "// PTX output:\n" "// mov.u32 %r2, %tid.x;\n" "\n" "#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \\\n" " __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \\\n" " static inline __attribute__((always_inline)) \\\n" " __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \\\n" " return INTRINSIC; \\\n" " }\n" "\n" "#if __cplusplus >= 201103L\n" "#define __DELETE =delete\n" "#else\n" "#define __DELETE\n" "#endif\n" "\n" "// Make sure nobody can create instances of the special variable types. nvcc\n" "// also disallows taking address of special variables, so we disable address-of\n" "// operator as well.\n" "#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \\\n" " __attribute__((device)) TypeName() __DELETE; \\\n" " __attribute__((device)) TypeName(const TypeName &) __DELETE; \\\n" " __attribute__((device)) void operator=(const TypeName &) const __DELETE; \\\n" " __attribute__((device)) TypeName *operator&() const __DELETE\n" "\n" "struct __cuda_builtin_threadIdx_t {\n" " __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x());\n" " __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y());\n" " __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());\n" " // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a\n" " // uint3). This function is defined after we pull in vector_types.h.\n" " __attribute__((device)) operator dim3() const;\n" " __attribute__((device)) operator uint3() const;\n" "\n" "private:\n" " __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);\n" "};\n" "\n" "struct __cuda_builtin_blockIdx_t {\n" " __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x());\n" " __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y());\n" " __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());\n" " // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a\n" " // uint3). This function is defined after we pull in vector_types.h.\n" " __attribute__((device)) operator dim3() const;\n" " __attribute__((device)) operator uint3() const;\n" "\n" "private:\n" " __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);\n" "};\n" "\n" "struct __cuda_builtin_blockDim_t {\n" " __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x());\n" " __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y());\n" " __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z());\n" " // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a\n" " // dim3). This function is defined after we pull in vector_types.h.\n" " __attribute__((device)) operator dim3() const;\n" " __attribute__((device)) operator uint3() const;\n" "\n" "private:\n" " __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);\n" "};\n" "\n" "struct __cuda_builtin_gridDim_t {\n" " __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x());\n" " __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y());\n" " __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z());\n" " // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a\n" " // dim3). This function is defined after we pull in vector_types.h.\n" " __attribute__((device)) operator dim3() const;\n" " __attribute__((device)) operator uint3() const;\n" "\n" "private:\n" " __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);\n" "};\n" "\n" "#define __CUDA_BUILTIN_VAR \\\n" " extern const __attribute__((device)) __attribute__((weak))\n" "__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx;\n" "__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx;\n" "__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim;\n" "__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim;\n" "\n" "// warpSize should translate to read of %WARP_SZ but there's currently no\n" "// builtin to do so. According to PTX v4.2 docs 'to date, all target\n" "// architectures have a WARP_SZ value of 32'.\n" "__attribute__((device)) const int warpSize = 32;\n" "\n" "#undef __CUDA_DEVICE_BUILTIN\n" "#undef __CUDA_BUILTIN_VAR\n" "#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS\n" "#undef __DELETE\n" "\n" "#endif /* __CUDA_BUILTIN_VARS_H */\n" "" } , { "/builtins/__clang_cuda_cmath.h" , "/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG_CUDA_CMATH_H__\n" "#define __CLANG_CUDA_CMATH_H__\n" "#ifndef __CUDA__\n" "#error \"This file is for CUDA compilation only.\"\n" "#endif\n" "\n" "#ifndef __OPENMP_NVPTX__\n" "#include \n" "#endif\n" "\n" "// CUDA lets us use various std math functions on the device side. This file\n" "// works in concert with __clang_cuda_math_forward_declares.h to make this work.\n" "//\n" "// Specifically, the forward-declares header declares __device__ overloads for\n" "// these functions in the global namespace, then pulls them into namespace std\n" "// with 'using' statements. Then this file implements those functions, after\n" "// their implementations have been pulled in.\n" "//\n" "// It's important that we declare the functions in the global namespace and pull\n" "// them into namespace std with using statements, as opposed to simply declaring\n" "// these functions in namespace std, because our device functions need to\n" "// overload the standard library functions, which may be declared in the global\n" "// namespace or in std, depending on the degree of conformance of the stdlib\n" "// implementation. Declaring in the global namespace and pulling into namespace\n" "// std covers all of the known knowns.\n" "\n" "#ifdef __OPENMP_NVPTX__\n" "#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))\n" "#endif\n" "\n" "__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }\n" "__DEVICE__ long abs(long __n) { return ::labs(__n); }\n" "__DEVICE__ float abs(float __x) { return ::fabsf(__x); }\n" "__DEVICE__ double abs(double __x) { return ::fabs(__x); }\n" "__DEVICE__ float acos(float __x) { return ::acosf(__x); }\n" "__DEVICE__ float asin(float __x) { return ::asinf(__x); }\n" "__DEVICE__ float atan(float __x) { return ::atanf(__x); }\n" "__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }\n" "__DEVICE__ float ceil(float __x) { return ::ceilf(__x); }\n" "__DEVICE__ float cos(float __x) { return ::cosf(__x); }\n" "__DEVICE__ float cosh(float __x) { return ::coshf(__x); }\n" "__DEVICE__ float exp(float __x) { return ::expf(__x); }\n" "__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }\n" "__DEVICE__ float floor(float __x) { return ::floorf(__x); }\n" "__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }\n" "__DEVICE__ int fpclassify(float __x) {\n" " return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,\n" " FP_ZERO, __x);\n" "}\n" "__DEVICE__ int fpclassify(double __x) {\n" " return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,\n" " FP_ZERO, __x);\n" "}\n" "__DEVICE__ float frexp(float __arg, int *__exp) {\n" " return ::frexpf(__arg, __exp);\n" "}\n" "\n" "// For inscrutable reasons, the CUDA headers define these functions for us on\n" "// Windows.\n" "#if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__)\n" "\n" "// For OpenMP we work around some old system headers that have non-conforming\n" "// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do\n" "// this by providing two versions of these functions, differing only in the\n" "// return type. To avoid conflicting definitions we disable implicit base\n" "// function generation. That means we will end up with two specializations, one\n" "// per type, but only one has a base function defined by the system header.\n" "#if defined(__OPENMP_NVPTX__)\n" "#pragma omp begin declare variant match( \\\n" " implementation = {extension(disable_implicit_base)})\n" "\n" "// FIXME: We lack an extension to customize the mangling of the variants, e.g.,\n" "// add a suffix. This means we would clash with the names of the variants\n" "// (note that we do not create implicit base functions here). To avoid\n" "// this clash we add a new trait to some of them that is always true\n" "// (this is LLVM after all ;)). It will only influence the mangled name\n" "// of the variants inside the inner region and avoid the clash.\n" "#pragma omp begin declare variant match(implementation = {vendor(llvm)})\n" "\n" "__DEVICE__ int isinf(float __x) { return ::__isinff(__x); }\n" "__DEVICE__ int isinf(double __x) { return ::__isinf(__x); }\n" "__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); }\n" "__DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); }\n" "__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); }\n" "__DEVICE__ int isnan(double __x) { return ::__isnan(__x); }\n" "\n" "#pragma omp end declare variant\n" "\n" "#endif\n" "\n" "__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }\n" "__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }\n" "__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }\n" "// For inscrutable reasons, __finite(), the double-precision version of\n" "// __finitef, does not exist when compiling for MacOS. __isfinited is available\n" "// everywhere and is just as good.\n" "__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }\n" "__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }\n" "__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }\n" "\n" "#if defined(__OPENMP_NVPTX__)\n" "#pragma omp end declare variant\n" "#endif\n" "\n" "#endif\n" "\n" "__DEVICE__ bool isgreater(float __x, float __y) {\n" " return __builtin_isgreater(__x, __y);\n" "}\n" "__DEVICE__ bool isgreater(double __x, double __y) {\n" " return __builtin_isgreater(__x, __y);\n" "}\n" "__DEVICE__ bool isgreaterequal(float __x, float __y) {\n" " return __builtin_isgreaterequal(__x, __y);\n" "}\n" "__DEVICE__ bool isgreaterequal(double __x, double __y) {\n" " return __builtin_isgreaterequal(__x, __y);\n" "}\n" "__DEVICE__ bool isless(float __x, float __y) {\n" " return __builtin_isless(__x, __y);\n" "}\n" "__DEVICE__ bool isless(double __x, double __y) {\n" " return __builtin_isless(__x, __y);\n" "}\n" "__DEVICE__ bool islessequal(float __x, float __y) {\n" " return __builtin_islessequal(__x, __y);\n" "}\n" "__DEVICE__ bool islessequal(double __x, double __y) {\n" " return __builtin_islessequal(__x, __y);\n" "}\n" "__DEVICE__ bool islessgreater(float __x, float __y) {\n" " return __builtin_islessgreater(__x, __y);\n" "}\n" "__DEVICE__ bool islessgreater(double __x, double __y) {\n" " return __builtin_islessgreater(__x, __y);\n" "}\n" "__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }\n" "__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }\n" "__DEVICE__ bool isunordered(float __x, float __y) {\n" " return __builtin_isunordered(__x, __y);\n" "}\n" "__DEVICE__ bool isunordered(double __x, double __y) {\n" " return __builtin_isunordered(__x, __y);\n" "}\n" "__DEVICE__ float ldexp(float __arg, int __exp) {\n" " return ::ldexpf(__arg, __exp);\n" "}\n" "__DEVICE__ float log(float __x) { return ::logf(__x); }\n" "__DEVICE__ float log10(float __x) { return ::log10f(__x); }\n" "__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }\n" "__DEVICE__ float pow(float __base, float __exp) {\n" " return ::powf(__base, __exp);\n" "}\n" "__DEVICE__ float pow(float __base, int __iexp) {\n" " return ::powif(__base, __iexp);\n" "}\n" "__DEVICE__ double pow(double __base, int __iexp) {\n" " return ::powi(__base, __iexp);\n" "}\n" "__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }\n" "__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); }\n" "__DEVICE__ float sin(float __x) { return ::sinf(__x); }\n" "__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }\n" "__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }\n" "__DEVICE__ float tan(float __x) { return ::tanf(__x); }\n" "__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }\n" "\n" "// There was a redefinition error for this this overload in CUDA mode.\n" "// We restrict it to OpenMP mode for now, that is where it is actually needed\n" "// anyway.\n" "#ifdef __OPENMP_NVPTX__\n" "__DEVICE__ float remquo(float __n, float __d, int *__q) {\n" " return ::remquof(__n, __d, __q);\n" "}\n" "#endif\n" "\n" "// Notably missing above is nexttoward. We omit it because\n" "// libdevice doesn't provide an implementation, and we don't want to be in the\n" "// business of implementing tricky libm functions in this header.\n" "\n" "#ifndef __OPENMP_NVPTX__\n" "\n" "// Now we've defined everything we promised we'd define in\n" "// __clang_cuda_math_forward_declares.h. We need to do two additional things to\n" "// fix up our math functions.\n" "//\n" "// 1) Define __device__ overloads for e.g. sin(int). The CUDA headers define\n" "// only sin(float) and sin(double), which means that e.g. sin(0) is\n" "// ambiguous.\n" "//\n" "// 2) Pull the __device__ overloads of \"foobarf\" math functions into namespace\n" "// std. These are defined in the CUDA headers in the global namespace,\n" "// independent of everything else we've done here.\n" "\n" "// We can't use std::enable_if, because we want to be pre-C++11 compatible. But\n" "// we go ahead and unconditionally define functions that are only available when\n" "// compiling for C++11 to match the behavior of the CUDA headers.\n" "template\n" "struct __clang_cuda_enable_if {};\n" "\n" "template struct __clang_cuda_enable_if {\n" " typedef __T type;\n" "};\n" "\n" "// Defines an overload of __fn that accepts one integral argument, calls\n" "// __fn((double)x), and returns __retty.\n" "#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \\\n" " template \\\n" " __DEVICE__ \\\n" " typename __clang_cuda_enable_if::is_integer, \\\n" " __retty>::type \\\n" " __fn(__T __x) { \\\n" " return ::__fn((double)__x); \\\n" " }\n" "\n" "// Defines an overload of __fn that accepts one two arithmetic arguments, calls\n" "// __fn((double)x, (double)y), and returns a double.\n" "//\n" "// Note this is different from OVERLOAD_1, which generates an overload that\n" "// accepts only *integral* arguments.\n" "#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \\\n" " template \\\n" " __DEVICE__ typename __clang_cuda_enable_if< \\\n" " std::numeric_limits<__T1>::is_specialized && \\\n" " std::numeric_limits<__T2>::is_specialized, \\\n" " __retty>::type \\\n" " __fn(__T1 __x, __T2 __y) { \\\n" " return __fn((double)__x, (double)__y); \\\n" " }\n" "\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round);\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma)\n" "__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc);\n" "\n" "#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1\n" "#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2\n" "\n" "// Overloads for functions that don't match the patterns expected by\n" "// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}.\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if<\n" " std::numeric_limits<__T1>::is_specialized &&\n" " std::numeric_limits<__T2>::is_specialized &&\n" " std::numeric_limits<__T3>::is_specialized,\n" " double>::type\n" "fma(__T1 __x, __T2 __y, __T3 __z) {\n" " return std::fma((double)__x, (double)__y, (double)__z);\n" "}\n" "\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if::is_integer,\n" " double>::type\n" "frexp(__T __x, int *__exp) {\n" " return std::frexp((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if::is_integer,\n" " double>::type\n" "ldexp(__T __x, int __exp) {\n" " return std::ldexp((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if<\n" " std::numeric_limits<__T1>::is_specialized &&\n" " std::numeric_limits<__T2>::is_specialized,\n" " double>::type\n" "remquo(__T1 __x, __T2 __y, int *__quo) {\n" " return std::remquo((double)__x, (double)__y, __quo);\n" "}\n" "\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if::is_integer,\n" " double>::type\n" "scalbln(__T __x, long __exp) {\n" " return std::scalbln((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ typename __clang_cuda_enable_if::is_integer,\n" " double>::type\n" "scalbn(__T __x, int __exp) {\n" " return std::scalbn((double)__x, __exp);\n" "}\n" "\n" "// We need to define these overloads in exactly the namespace our standard\n" "// library uses (including the right inline namespace), otherwise they won't be\n" "// picked up by other functions in the standard library (e.g. functions in\n" "// ). Thus the ugliness below.\n" "#ifdef _LIBCPP_BEGIN_NAMESPACE_STD\n" "_LIBCPP_BEGIN_NAMESPACE_STD\n" "#else\n" "namespace std {\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "#endif\n" "#endif\n" "\n" "// Pull the new overloads we defined above into namespace std.\n" "using ::acos;\n" "using ::acosh;\n" "using ::asin;\n" "using ::asinh;\n" "using ::atan;\n" "using ::atan2;\n" "using ::atanh;\n" "using ::cbrt;\n" "using ::ceil;\n" "using ::copysign;\n" "using ::cos;\n" "using ::cosh;\n" "using ::erf;\n" "using ::erfc;\n" "using ::exp;\n" "using ::exp2;\n" "using ::expm1;\n" "using ::fabs;\n" "using ::fdim;\n" "using ::floor;\n" "using ::fma;\n" "using ::fmax;\n" "using ::fmin;\n" "using ::fmod;\n" "using ::fpclassify;\n" "using ::frexp;\n" "using ::hypot;\n" "using ::ilogb;\n" "using ::isfinite;\n" "using ::isgreater;\n" "using ::isgreaterequal;\n" "using ::isless;\n" "using ::islessequal;\n" "using ::islessgreater;\n" "using ::isnormal;\n" "using ::isunordered;\n" "using ::ldexp;\n" "using ::lgamma;\n" "using ::llrint;\n" "using ::llround;\n" "using ::log;\n" "using ::log10;\n" "using ::log1p;\n" "using ::log2;\n" "using ::logb;\n" "using ::lrint;\n" "using ::lround;\n" "using ::nearbyint;\n" "using ::nextafter;\n" "using ::pow;\n" "using ::remainder;\n" "using ::remquo;\n" "using ::rint;\n" "using ::round;\n" "using ::scalbln;\n" "using ::scalbn;\n" "using ::signbit;\n" "using ::sin;\n" "using ::sinh;\n" "using ::sqrt;\n" "using ::tan;\n" "using ::tanh;\n" "using ::tgamma;\n" "using ::trunc;\n" "\n" "// Well this is fun: We need to pull these symbols in for libc++, but we can't\n" "// pull them in with libstdc++, because its ::isinf and ::isnan are different\n" "// than its std::isinf and std::isnan.\n" "#ifndef __GLIBCXX__\n" "using ::isinf;\n" "using ::isnan;\n" "#endif\n" "\n" "// Finally, pull the \"foobarf\" functions that CUDA defines in its headers into\n" "// namespace std.\n" "using ::acosf;\n" "using ::acoshf;\n" "using ::asinf;\n" "using ::asinhf;\n" "using ::atan2f;\n" "using ::atanf;\n" "using ::atanhf;\n" "using ::cbrtf;\n" "using ::ceilf;\n" "using ::copysignf;\n" "using ::cosf;\n" "using ::coshf;\n" "using ::erfcf;\n" "using ::erff;\n" "using ::exp2f;\n" "using ::expf;\n" "using ::expm1f;\n" "using ::fabsf;\n" "using ::fdimf;\n" "using ::floorf;\n" "using ::fmaf;\n" "using ::fmaxf;\n" "using ::fminf;\n" "using ::fmodf;\n" "using ::frexpf;\n" "using ::hypotf;\n" "using ::ilogbf;\n" "using ::ldexpf;\n" "using ::lgammaf;\n" "using ::llrintf;\n" "using ::llroundf;\n" "using ::log10f;\n" "using ::log1pf;\n" "using ::log2f;\n" "using ::logbf;\n" "using ::logf;\n" "using ::lrintf;\n" "using ::lroundf;\n" "using ::modff;\n" "using ::nearbyintf;\n" "using ::nextafterf;\n" "using ::powf;\n" "using ::remainderf;\n" "using ::remquof;\n" "using ::rintf;\n" "using ::roundf;\n" "using ::scalblnf;\n" "using ::scalbnf;\n" "using ::sinf;\n" "using ::sinhf;\n" "using ::sqrtf;\n" "using ::tanf;\n" "using ::tanhf;\n" "using ::tgammaf;\n" "using ::truncf;\n" "\n" "#ifdef _LIBCPP_END_NAMESPACE_STD\n" "_LIBCPP_END_NAMESPACE_STD\n" "#else\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_END_NAMESPACE_VERSION\n" "#endif\n" "} // namespace std\n" "#endif\n" "\n" "#endif // __OPENMP_NVPTX__\n" "\n" "#undef __DEVICE__\n" "\n" "#endif\n" "" } , { "/builtins/__clang_cuda_complex_builtins.h" , "/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_CUDA_COMPLEX_BUILTINS\n" "#define __CLANG_CUDA_COMPLEX_BUILTINS\n" "\n" "// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are\n" "// libgcc functions that clang assumes are available when compiling c99 complex\n" "// operations. (These implementations come from libc++, and have been modified\n" "// to work with CUDA and OpenMP target offloading [in C and C++ mode].)\n" "\n" "#pragma push_macro(\"__DEVICE__\")\n" "#if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__)\n" "#pragma omp declare target\n" "#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))\n" "#else\n" "#define __DEVICE__ __device__ inline\n" "#endif\n" "\n" "// To make the algorithms available for C and C++ in CUDA and OpenMP we select\n" "// different but equivalent function versions. TODO: For OpenMP we currently\n" "// select the native builtins as the overload support for templates is lacking.\n" "#if !defined(__OPENMP_NVPTX__) && !defined(__OPENMP_AMDGCN__)\n" "#define _ISNANd std::isnan\n" "#define _ISNANf std::isnan\n" "#define _ISINFd std::isinf\n" "#define _ISINFf std::isinf\n" "#define _ISFINITEd std::isfinite\n" "#define _ISFINITEf std::isfinite\n" "#define _COPYSIGNd std::copysign\n" "#define _COPYSIGNf std::copysign\n" "#define _SCALBNd std::scalbn\n" "#define _SCALBNf std::scalbn\n" "#define _ABSd std::abs\n" "#define _ABSf std::abs\n" "#define _LOGBd std::logb\n" "#define _LOGBf std::logb\n" "// Rather than pulling in std::max from algorithm everytime, use available ::max.\n" "#define _fmaxd max\n" "#define _fmaxf max\n" "#else\n" "#ifdef __AMDGCN__\n" "#define _ISNANd __ocml_isnan_f64\n" "#define _ISNANf __ocml_isnan_f32\n" "#define _ISINFd __ocml_isinf_f64\n" "#define _ISINFf __ocml_isinf_f32\n" "#define _ISFINITEd __ocml_isfinite_f64\n" "#define _ISFINITEf __ocml_isfinite_f32\n" "#define _COPYSIGNd __ocml_copysign_f64\n" "#define _COPYSIGNf __ocml_copysign_f32\n" "#define _SCALBNd __ocml_scalbn_f64\n" "#define _SCALBNf __ocml_scalbn_f32\n" "#define _ABSd __ocml_fabs_f64\n" "#define _ABSf __ocml_fabs_f32\n" "#define _LOGBd __ocml_logb_f64\n" "#define _LOGBf __ocml_logb_f32\n" "#define _fmaxd __ocml_fmax_f64\n" "#define _fmaxf __ocml_fmax_f32\n" "#else\n" "#define _ISNANd __nv_isnand\n" "#define _ISNANf __nv_isnanf\n" "#define _ISINFd __nv_isinfd\n" "#define _ISINFf __nv_isinff\n" "#define _ISFINITEd __nv_isfinited\n" "#define _ISFINITEf __nv_finitef\n" "#define _COPYSIGNd __nv_copysign\n" "#define _COPYSIGNf __nv_copysignf\n" "#define _SCALBNd __nv_scalbn\n" "#define _SCALBNf __nv_scalbnf\n" "#define _ABSd __nv_fabs\n" "#define _ABSf __nv_fabsf\n" "#define _LOGBd __nv_logb\n" "#define _LOGBf __nv_logbf\n" "#define _fmaxd __nv_fmax\n" "#define _fmaxf __nv_fmaxf\n" "#endif\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c,\n" " double __d) {\n" " double __ac = __a * __c;\n" " double __bd = __b * __d;\n" " double __ad = __a * __d;\n" " double __bc = __b * __c;\n" " double _Complex z;\n" " __real__(z) = __ac - __bd;\n" " __imag__(z) = __ad + __bc;\n" " if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {\n" " int __recalc = 0;\n" " if (_ISINFd(__a) || _ISINFd(__b)) {\n" " __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a);\n" " __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b);\n" " if (_ISNANd(__c))\n" " __c = _COPYSIGNd(0, __c);\n" " if (_ISNANd(__d))\n" " __d = _COPYSIGNd(0, __d);\n" " __recalc = 1;\n" " }\n" " if (_ISINFd(__c) || _ISINFd(__d)) {\n" " __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c);\n" " __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d);\n" " if (_ISNANd(__a))\n" " __a = _COPYSIGNd(0, __a);\n" " if (_ISNANd(__b))\n" " __b = _COPYSIGNd(0, __b);\n" " __recalc = 1;\n" " }\n" " if (!__recalc &&\n" " (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) {\n" " if (_ISNANd(__a))\n" " __a = _COPYSIGNd(0, __a);\n" " if (_ISNANd(__b))\n" " __b = _COPYSIGNd(0, __b);\n" " if (_ISNANd(__c))\n" " __c = _COPYSIGNd(0, __c);\n" " if (_ISNANd(__d))\n" " __d = _COPYSIGNd(0, __d);\n" " __recalc = 1;\n" " }\n" " if (__recalc) {\n" " // Can't use std::numeric_limits::infinity() -- that doesn't have\n" " // a device overload (and isn't constexpr before C++11, naturally).\n" " __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d);\n" " __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c);\n" " }\n" " }\n" " return z;\n" "}\n" "\n" "__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) {\n" " float __ac = __a * __c;\n" " float __bd = __b * __d;\n" " float __ad = __a * __d;\n" " float __bc = __b * __c;\n" " float _Complex z;\n" " __real__(z) = __ac - __bd;\n" " __imag__(z) = __ad + __bc;\n" " if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {\n" " int __recalc = 0;\n" " if (_ISINFf(__a) || _ISINFf(__b)) {\n" " __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);\n" " __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);\n" " if (_ISNANf(__c))\n" " __c = _COPYSIGNf(0, __c);\n" " if (_ISNANf(__d))\n" " __d = _COPYSIGNf(0, __d);\n" " __recalc = 1;\n" " }\n" " if (_ISINFf(__c) || _ISINFf(__d)) {\n" " __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);\n" " __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);\n" " if (_ISNANf(__a))\n" " __a = _COPYSIGNf(0, __a);\n" " if (_ISNANf(__b))\n" " __b = _COPYSIGNf(0, __b);\n" " __recalc = 1;\n" " }\n" " if (!__recalc &&\n" " (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) {\n" " if (_ISNANf(__a))\n" " __a = _COPYSIGNf(0, __a);\n" " if (_ISNANf(__b))\n" " __b = _COPYSIGNf(0, __b);\n" " if (_ISNANf(__c))\n" " __c = _COPYSIGNf(0, __c);\n" " if (_ISNANf(__d))\n" " __d = _COPYSIGNf(0, __d);\n" " __recalc = 1;\n" " }\n" " if (__recalc) {\n" " __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);\n" " __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);\n" " }\n" " }\n" " return z;\n" "}\n" "\n" "__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,\n" " double __d) {\n" " int __ilogbw = 0;\n" " // Can't use std::max, because that's defined in , and we don't\n" " // want to pull that in for every compile. The CUDA headers define\n" " // ::max(float, float) and ::max(double, double), which is sufficient for us.\n" " double __logbw = _LOGBd(_fmaxd(_ABSd(__c), _ABSd(__d)));\n" " if (_ISFINITEd(__logbw)) {\n" " __ilogbw = (int)__logbw;\n" " __c = _SCALBNd(__c, -__ilogbw);\n" " __d = _SCALBNd(__d, -__ilogbw);\n" " }\n" " double __denom = __c * __c + __d * __d;\n" " double _Complex z;\n" " __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw);\n" " __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw);\n" " if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {\n" " if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) {\n" " __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a;\n" " __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b;\n" " } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) &&\n" " _ISFINITEd(__d)) {\n" " __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a);\n" " __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b);\n" " __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d);\n" " __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d);\n" " } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) &&\n" " _ISFINITEd(__b)) {\n" " __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c);\n" " __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d);\n" " __real__(z) = 0.0 * (__a * __c + __b * __d);\n" " __imag__(z) = 0.0 * (__b * __c - __a * __d);\n" " }\n" " }\n" " return z;\n" "}\n" "\n" "__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {\n" " int __ilogbw = 0;\n" " float __logbw = _LOGBf(_fmaxf(_ABSf(__c), _ABSf(__d)));\n" " if (_ISFINITEf(__logbw)) {\n" " __ilogbw = (int)__logbw;\n" " __c = _SCALBNf(__c, -__ilogbw);\n" " __d = _SCALBNf(__d, -__ilogbw);\n" " }\n" " float __denom = __c * __c + __d * __d;\n" " float _Complex z;\n" " __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw);\n" " __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw);\n" " if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {\n" " if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) {\n" " __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a;\n" " __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b;\n" " } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) &&\n" " _ISFINITEf(__d)) {\n" " __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);\n" " __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);\n" " __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);\n" " __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);\n" " } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) &&\n" " _ISFINITEf(__b)) {\n" " __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);\n" " __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);\n" " __real__(z) = 0 * (__a * __c + __b * __d);\n" " __imag__(z) = 0 * (__b * __c - __a * __d);\n" " }\n" " }\n" " return z;\n" "}\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "\n" "#undef _ISNANd\n" "#undef _ISNANf\n" "#undef _ISINFd\n" "#undef _ISINFf\n" "#undef _COPYSIGNd\n" "#undef _COPYSIGNf\n" "#undef _ISFINITEd\n" "#undef _ISFINITEf\n" "#undef _SCALBNd\n" "#undef _SCALBNf\n" "#undef _ABSd\n" "#undef _ABSf\n" "#undef _LOGBd\n" "#undef _LOGBf\n" "#undef _fmaxd\n" "#undef _fmaxf\n" "\n" "#if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__)\n" "#pragma omp end declare target\n" "#endif\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "\n" "#endif // __CLANG_CUDA_COMPLEX_BUILTINS\n" "" } , { "/builtins/__clang_cuda_device_functions.h" , "/*===---- __clang_cuda_device_functions.h - CUDA runtime support -----------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__\n" "#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__\n" "\n" "#ifndef __OPENMP_NVPTX__\n" "#if CUDA_VERSION < 9000\n" "#error This file is intended to be used with CUDA-9+ only.\n" "#endif\n" "#endif\n" "\n" "// __DEVICE__ is a helper macro with common set of attributes for the wrappers\n" "// we implement in this file. We need static in order to avoid emitting unused\n" "// functions and __forceinline__ helps inlining these wrappers at -O1.\n" "#pragma push_macro(\"__DEVICE__\")\n" "#ifdef __OPENMP_NVPTX__\n" "#define __DEVICE__ static __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE__ static __device__ __forceinline__\n" "#endif\n" "\n" "__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }\n" "__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }\n" "__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }\n" "__DEVICE__ unsigned int __brev(unsigned int __a) { return __nv_brev(__a); }\n" "__DEVICE__ unsigned long long __brevll(unsigned long long __a) {\n" " return __nv_brevll(__a);\n" "}\n" "#if defined(__cplusplus)\n" "__DEVICE__ void __brkpt() { __asm__ __volatile__(\"brkpt;\"); }\n" "__DEVICE__ void __brkpt(int __a) { __brkpt(); }\n" "#else\n" "__DEVICE__ void __attribute__((overloadable)) __brkpt(void) {\n" " __asm__ __volatile__(\"brkpt;\");\n" "}\n" "__DEVICE__ void __attribute__((overloadable)) __brkpt(int __a) { __brkpt(); }\n" "#endif\n" "__DEVICE__ unsigned int __byte_perm(unsigned int __a, unsigned int __b,\n" " unsigned int __c) {\n" " return __nv_byte_perm(__a, __b, __c);\n" "}\n" "__DEVICE__ int __clz(int __a) { return __nv_clz(__a); }\n" "__DEVICE__ int __clzll(long long __a) { return __nv_clzll(__a); }\n" "__DEVICE__ float __cosf(float __a) { return __nv_fast_cosf(__a); }\n" "__DEVICE__ double __dAtomicAdd(double *__p, double __v) {\n" " return __nvvm_atom_add_gen_d(__p, __v);\n" "}\n" "__DEVICE__ double __dAtomicAdd_block(double *__p, double __v) {\n" " return __nvvm_atom_cta_add_gen_d(__p, __v);\n" "}\n" "__DEVICE__ double __dAtomicAdd_system(double *__p, double __v) {\n" " return __nvvm_atom_sys_add_gen_d(__p, __v);\n" "}\n" "__DEVICE__ double __dadd_rd(double __a, double __b) {\n" " return __nv_dadd_rd(__a, __b);\n" "}\n" "__DEVICE__ double __dadd_rn(double __a, double __b) {\n" " return __nv_dadd_rn(__a, __b);\n" "}\n" "__DEVICE__ double __dadd_ru(double __a, double __b) {\n" " return __nv_dadd_ru(__a, __b);\n" "}\n" "__DEVICE__ double __dadd_rz(double __a, double __b) {\n" " return __nv_dadd_rz(__a, __b);\n" "}\n" "__DEVICE__ double __ddiv_rd(double __a, double __b) {\n" " return __nv_ddiv_rd(__a, __b);\n" "}\n" "__DEVICE__ double __ddiv_rn(double __a, double __b) {\n" " return __nv_ddiv_rn(__a, __b);\n" "}\n" "__DEVICE__ double __ddiv_ru(double __a, double __b) {\n" " return __nv_ddiv_ru(__a, __b);\n" "}\n" "__DEVICE__ double __ddiv_rz(double __a, double __b) {\n" " return __nv_ddiv_rz(__a, __b);\n" "}\n" "__DEVICE__ double __dmul_rd(double __a, double __b) {\n" " return __nv_dmul_rd(__a, __b);\n" "}\n" "__DEVICE__ double __dmul_rn(double __a, double __b) {\n" " return __nv_dmul_rn(__a, __b);\n" "}\n" "__DEVICE__ double __dmul_ru(double __a, double __b) {\n" " return __nv_dmul_ru(__a, __b);\n" "}\n" "__DEVICE__ double __dmul_rz(double __a, double __b) {\n" " return __nv_dmul_rz(__a, __b);\n" "}\n" "__DEVICE__ float __double2float_rd(double __a) {\n" " return __nv_double2float_rd(__a);\n" "}\n" "__DEVICE__ float __double2float_rn(double __a) {\n" " return __nv_double2float_rn(__a);\n" "}\n" "__DEVICE__ float __double2float_ru(double __a) {\n" " return __nv_double2float_ru(__a);\n" "}\n" "__DEVICE__ float __double2float_rz(double __a) {\n" " return __nv_double2float_rz(__a);\n" "}\n" "__DEVICE__ int __double2hiint(double __a) { return __nv_double2hiint(__a); }\n" "__DEVICE__ int __double2int_rd(double __a) { return __nv_double2int_rd(__a); }\n" "__DEVICE__ int __double2int_rn(double __a) { return __nv_double2int_rn(__a); }\n" "__DEVICE__ int __double2int_ru(double __a) { return __nv_double2int_ru(__a); }\n" "__DEVICE__ int __double2int_rz(double __a) { return __nv_double2int_rz(__a); }\n" "__DEVICE__ long long __double2ll_rd(double __a) {\n" " return __nv_double2ll_rd(__a);\n" "}\n" "__DEVICE__ long long __double2ll_rn(double __a) {\n" " return __nv_double2ll_rn(__a);\n" "}\n" "__DEVICE__ long long __double2ll_ru(double __a) {\n" " return __nv_double2ll_ru(__a);\n" "}\n" "__DEVICE__ long long __double2ll_rz(double __a) {\n" " return __nv_double2ll_rz(__a);\n" "}\n" "__DEVICE__ int __double2loint(double __a) { return __nv_double2loint(__a); }\n" "__DEVICE__ unsigned int __double2uint_rd(double __a) {\n" " return __nv_double2uint_rd(__a);\n" "}\n" "__DEVICE__ unsigned int __double2uint_rn(double __a) {\n" " return __nv_double2uint_rn(__a);\n" "}\n" "__DEVICE__ unsigned int __double2uint_ru(double __a) {\n" " return __nv_double2uint_ru(__a);\n" "}\n" "__DEVICE__ unsigned int __double2uint_rz(double __a) {\n" " return __nv_double2uint_rz(__a);\n" "}\n" "__DEVICE__ unsigned long long __double2ull_rd(double __a) {\n" " return __nv_double2ull_rd(__a);\n" "}\n" "__DEVICE__ unsigned long long __double2ull_rn(double __a) {\n" " return __nv_double2ull_rn(__a);\n" "}\n" "__DEVICE__ unsigned long long __double2ull_ru(double __a) {\n" " return __nv_double2ull_ru(__a);\n" "}\n" "__DEVICE__ unsigned long long __double2ull_rz(double __a) {\n" " return __nv_double2ull_rz(__a);\n" "}\n" "__DEVICE__ long long __double_as_longlong(double __a) {\n" " return __nv_double_as_longlong(__a);\n" "}\n" "__DEVICE__ double __drcp_rd(double __a) { return __nv_drcp_rd(__a); }\n" "__DEVICE__ double __drcp_rn(double __a) { return __nv_drcp_rn(__a); }\n" "__DEVICE__ double __drcp_ru(double __a) { return __nv_drcp_ru(__a); }\n" "__DEVICE__ double __drcp_rz(double __a) { return __nv_drcp_rz(__a); }\n" "__DEVICE__ double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); }\n" "__DEVICE__ double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); }\n" "__DEVICE__ double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); }\n" "__DEVICE__ double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); }\n" "__DEVICE__ double __dsub_rd(double __a, double __b) {\n" " return __nv_dsub_rd(__a, __b);\n" "}\n" "__DEVICE__ double __dsub_rn(double __a, double __b) {\n" " return __nv_dsub_rn(__a, __b);\n" "}\n" "__DEVICE__ double __dsub_ru(double __a, double __b) {\n" " return __nv_dsub_ru(__a, __b);\n" "}\n" "__DEVICE__ double __dsub_rz(double __a, double __b) {\n" " return __nv_dsub_rz(__a, __b);\n" "}\n" "__DEVICE__ float __exp10f(float __a) { return __nv_fast_exp10f(__a); }\n" "__DEVICE__ float __expf(float __a) { return __nv_fast_expf(__a); }\n" "__DEVICE__ float __fAtomicAdd(float *__p, float __v) {\n" " return __nvvm_atom_add_gen_f(__p, __v);\n" "}\n" "__DEVICE__ float __fAtomicAdd_block(float *__p, float __v) {\n" " return __nvvm_atom_cta_add_gen_f(__p, __v);\n" "}\n" "__DEVICE__ float __fAtomicAdd_system(float *__p, float __v) {\n" " return __nvvm_atom_sys_add_gen_f(__p, __v);\n" "}\n" "__DEVICE__ float __fAtomicExch(float *__p, float __v) {\n" " return __nv_int_as_float(\n" " __nvvm_atom_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));\n" "}\n" "__DEVICE__ float __fAtomicExch_block(float *__p, float __v) {\n" " return __nv_int_as_float(\n" " __nvvm_atom_cta_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));\n" "}\n" "__DEVICE__ float __fAtomicExch_system(float *__p, float __v) {\n" " return __nv_int_as_float(\n" " __nvvm_atom_sys_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));\n" "}\n" "__DEVICE__ float __fadd_rd(float __a, float __b) {\n" " return __nv_fadd_rd(__a, __b);\n" "}\n" "__DEVICE__ float __fadd_rn(float __a, float __b) {\n" " return __nv_fadd_rn(__a, __b);\n" "}\n" "__DEVICE__ float __fadd_ru(float __a, float __b) {\n" " return __nv_fadd_ru(__a, __b);\n" "}\n" "__DEVICE__ float __fadd_rz(float __a, float __b) {\n" " return __nv_fadd_rz(__a, __b);\n" "}\n" "__DEVICE__ float __fdiv_rd(float __a, float __b) {\n" " return __nv_fdiv_rd(__a, __b);\n" "}\n" "__DEVICE__ float __fdiv_rn(float __a, float __b) {\n" " return __nv_fdiv_rn(__a, __b);\n" "}\n" "__DEVICE__ float __fdiv_ru(float __a, float __b) {\n" " return __nv_fdiv_ru(__a, __b);\n" "}\n" "__DEVICE__ float __fdiv_rz(float __a, float __b) {\n" " return __nv_fdiv_rz(__a, __b);\n" "}\n" "__DEVICE__ float __fdividef(float __a, float __b) {\n" " return __nv_fast_fdividef(__a, __b);\n" "}\n" "__DEVICE__ int __ffs(int __a) { return __nv_ffs(__a); }\n" "__DEVICE__ int __ffsll(long long __a) { return __nv_ffsll(__a); }\n" "__DEVICE__ int __finite(double __a) { return __nv_isfinited(__a); }\n" "__DEVICE__ int __finitef(float __a) { return __nv_finitef(__a); }\n" "#ifdef _MSC_VER\n" "__DEVICE__ int __finitel(long double __a);\n" "#endif\n" "__DEVICE__ int __float2int_rd(float __a) { return __nv_float2int_rd(__a); }\n" "__DEVICE__ int __float2int_rn(float __a) { return __nv_float2int_rn(__a); }\n" "__DEVICE__ int __float2int_ru(float __a) { return __nv_float2int_ru(__a); }\n" "__DEVICE__ int __float2int_rz(float __a) { return __nv_float2int_rz(__a); }\n" "__DEVICE__ long long __float2ll_rd(float __a) { return __nv_float2ll_rd(__a); }\n" "__DEVICE__ long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); }\n" "__DEVICE__ long long __float2ll_ru(float __a) { return __nv_float2ll_ru(__a); }\n" "__DEVICE__ long long __float2ll_rz(float __a) { return __nv_float2ll_rz(__a); }\n" "__DEVICE__ unsigned int __float2uint_rd(float __a) {\n" " return __nv_float2uint_rd(__a);\n" "}\n" "__DEVICE__ unsigned int __float2uint_rn(float __a) {\n" " return __nv_float2uint_rn(__a);\n" "}\n" "__DEVICE__ unsigned int __float2uint_ru(float __a) {\n" " return __nv_float2uint_ru(__a);\n" "}\n" "__DEVICE__ unsigned int __float2uint_rz(float __a) {\n" " return __nv_float2uint_rz(__a);\n" "}\n" "__DEVICE__ unsigned long long __float2ull_rd(float __a) {\n" " return __nv_float2ull_rd(__a);\n" "}\n" "__DEVICE__ unsigned long long __float2ull_rn(float __a) {\n" " return __nv_float2ull_rn(__a);\n" "}\n" "__DEVICE__ unsigned long long __float2ull_ru(float __a) {\n" " return __nv_float2ull_ru(__a);\n" "}\n" "__DEVICE__ unsigned long long __float2ull_rz(float __a) {\n" " return __nv_float2ull_rz(__a);\n" "}\n" "__DEVICE__ int __float_as_int(float __a) { return __nv_float_as_int(__a); }\n" "__DEVICE__ unsigned int __float_as_uint(float __a) {\n" " return __nv_float_as_uint(__a);\n" "}\n" "__DEVICE__ double __fma_rd(double __a, double __b, double __c) {\n" " return __nv_fma_rd(__a, __b, __c);\n" "}\n" "__DEVICE__ double __fma_rn(double __a, double __b, double __c) {\n" " return __nv_fma_rn(__a, __b, __c);\n" "}\n" "__DEVICE__ double __fma_ru(double __a, double __b, double __c) {\n" " return __nv_fma_ru(__a, __b, __c);\n" "}\n" "__DEVICE__ double __fma_rz(double __a, double __b, double __c) {\n" " return __nv_fma_rz(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_ieee_rd(float __a, float __b, float __c) {\n" " return __nv_fmaf_ieee_rd(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_ieee_rn(float __a, float __b, float __c) {\n" " return __nv_fmaf_ieee_rn(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_ieee_ru(float __a, float __b, float __c) {\n" " return __nv_fmaf_ieee_ru(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_ieee_rz(float __a, float __b, float __c) {\n" " return __nv_fmaf_ieee_rz(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_rd(float __a, float __b, float __c) {\n" " return __nv_fmaf_rd(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_rn(float __a, float __b, float __c) {\n" " return __nv_fmaf_rn(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_ru(float __a, float __b, float __c) {\n" " return __nv_fmaf_ru(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmaf_rz(float __a, float __b, float __c) {\n" " return __nv_fmaf_rz(__a, __b, __c);\n" "}\n" "__DEVICE__ float __fmul_rd(float __a, float __b) {\n" " return __nv_fmul_rd(__a, __b);\n" "}\n" "__DEVICE__ float __fmul_rn(float __a, float __b) {\n" " return __nv_fmul_rn(__a, __b);\n" "}\n" "__DEVICE__ float __fmul_ru(float __a, float __b) {\n" " return __nv_fmul_ru(__a, __b);\n" "}\n" "__DEVICE__ float __fmul_rz(float __a, float __b) {\n" " return __nv_fmul_rz(__a, __b);\n" "}\n" "__DEVICE__ float __frcp_rd(float __a) { return __nv_frcp_rd(__a); }\n" "__DEVICE__ float __frcp_rn(float __a) { return __nv_frcp_rn(__a); }\n" "__DEVICE__ float __frcp_ru(float __a) { return __nv_frcp_ru(__a); }\n" "__DEVICE__ float __frcp_rz(float __a) { return __nv_frcp_rz(__a); }\n" "__DEVICE__ float __frsqrt_rn(float __a) { return __nv_frsqrt_rn(__a); }\n" "__DEVICE__ float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); }\n" "__DEVICE__ float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); }\n" "__DEVICE__ float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); }\n" "__DEVICE__ float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); }\n" "__DEVICE__ float __fsub_rd(float __a, float __b) {\n" " return __nv_fsub_rd(__a, __b);\n" "}\n" "__DEVICE__ float __fsub_rn(float __a, float __b) {\n" " return __nv_fsub_rn(__a, __b);\n" "}\n" "__DEVICE__ float __fsub_ru(float __a, float __b) {\n" " return __nv_fsub_ru(__a, __b);\n" "}\n" "__DEVICE__ float __fsub_rz(float __a, float __b) {\n" " return __nv_fsub_rz(__a, __b);\n" "}\n" "__DEVICE__ int __hadd(int __a, int __b) { return __nv_hadd(__a, __b); }\n" "__DEVICE__ double __hiloint2double(int __a, int __b) {\n" " return __nv_hiloint2double(__a, __b);\n" "}\n" "__DEVICE__ int __iAtomicAdd(int *__p, int __v) {\n" " return __nvvm_atom_add_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_add_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_add_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicAnd(int *__p, int __v) {\n" " return __nvvm_atom_and_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicAnd_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_and_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicAnd_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_and_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicCAS(int *__p, int __cmp, int __v) {\n" " return __nvvm_atom_cas_gen_i(__p, __cmp, __v);\n" "}\n" "__DEVICE__ int __iAtomicCAS_block(int *__p, int __cmp, int __v) {\n" " return __nvvm_atom_cta_cas_gen_i(__p, __cmp, __v);\n" "}\n" "__DEVICE__ int __iAtomicCAS_system(int *__p, int __cmp, int __v) {\n" " return __nvvm_atom_sys_cas_gen_i(__p, __cmp, __v);\n" "}\n" "__DEVICE__ int __iAtomicExch(int *__p, int __v) {\n" " return __nvvm_atom_xchg_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicExch_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_xchg_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicExch_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_xchg_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMax(int *__p, int __v) {\n" " return __nvvm_atom_max_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMax_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_max_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMax_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_max_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMin(int *__p, int __v) {\n" " return __nvvm_atom_min_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMin_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_min_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicMin_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_min_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicOr(int *__p, int __v) {\n" " return __nvvm_atom_or_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicOr_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_or_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicOr_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_or_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicXor(int *__p, int __v) {\n" " return __nvvm_atom_xor_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicXor_block(int *__p, int __v) {\n" " return __nvvm_atom_cta_xor_gen_i(__p, __v);\n" "}\n" "__DEVICE__ int __iAtomicXor_system(int *__p, int __v) {\n" " return __nvvm_atom_sys_xor_gen_i(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMax(long long *__p, long long __v) {\n" " return __nvvm_atom_max_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMax_block(long long *__p, long long __v) {\n" " return __nvvm_atom_cta_max_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMax_system(long long *__p, long long __v) {\n" " return __nvvm_atom_sys_max_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMin(long long *__p, long long __v) {\n" " return __nvvm_atom_min_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMin_block(long long *__p, long long __v) {\n" " return __nvvm_atom_cta_min_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __illAtomicMin_system(long long *__p, long long __v) {\n" " return __nvvm_atom_sys_min_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ double __int2double_rn(int __a) { return __nv_int2double_rn(__a); }\n" "__DEVICE__ float __int2float_rd(int __a) { return __nv_int2float_rd(__a); }\n" "__DEVICE__ float __int2float_rn(int __a) { return __nv_int2float_rn(__a); }\n" "__DEVICE__ float __int2float_ru(int __a) { return __nv_int2float_ru(__a); }\n" "__DEVICE__ float __int2float_rz(int __a) { return __nv_int2float_rz(__a); }\n" "__DEVICE__ float __int_as_float(int __a) { return __nv_int_as_float(__a); }\n" "__DEVICE__ int __isfinited(double __a) { return __nv_isfinited(__a); }\n" "__DEVICE__ int __isinf(double __a) { return __nv_isinfd(__a); }\n" "__DEVICE__ int __isinff(float __a) { return __nv_isinff(__a); }\n" "#ifdef _MSC_VER\n" "__DEVICE__ int __isinfl(long double __a);\n" "#endif\n" "__DEVICE__ int __isnan(double __a) { return __nv_isnand(__a); }\n" "__DEVICE__ int __isnanf(float __a) { return __nv_isnanf(__a); }\n" "#ifdef _MSC_VER\n" "__DEVICE__ int __isnanl(long double __a);\n" "#endif\n" "__DEVICE__ double __ll2double_rd(long long __a) {\n" " return __nv_ll2double_rd(__a);\n" "}\n" "__DEVICE__ double __ll2double_rn(long long __a) {\n" " return __nv_ll2double_rn(__a);\n" "}\n" "__DEVICE__ double __ll2double_ru(long long __a) {\n" " return __nv_ll2double_ru(__a);\n" "}\n" "__DEVICE__ double __ll2double_rz(long long __a) {\n" " return __nv_ll2double_rz(__a);\n" "}\n" "__DEVICE__ float __ll2float_rd(long long __a) { return __nv_ll2float_rd(__a); }\n" "__DEVICE__ float __ll2float_rn(long long __a) { return __nv_ll2float_rn(__a); }\n" "__DEVICE__ float __ll2float_ru(long long __a) { return __nv_ll2float_ru(__a); }\n" "__DEVICE__ float __ll2float_rz(long long __a) { return __nv_ll2float_rz(__a); }\n" "__DEVICE__ long long __llAtomicAnd(long long *__p, long long __v) {\n" " return __nvvm_atom_and_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicAnd_block(long long *__p, long long __v) {\n" " return __nvvm_atom_cta_and_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicAnd_system(long long *__p, long long __v) {\n" " return __nvvm_atom_sys_and_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicOr(long long *__p, long long __v) {\n" " return __nvvm_atom_or_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicOr_block(long long *__p, long long __v) {\n" " return __nvvm_atom_cta_or_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicOr_system(long long *__p, long long __v) {\n" " return __nvvm_atom_sys_or_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicXor(long long *__p, long long __v) {\n" " return __nvvm_atom_xor_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicXor_block(long long *__p, long long __v) {\n" " return __nvvm_atom_cta_xor_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ long long __llAtomicXor_system(long long *__p, long long __v) {\n" " return __nvvm_atom_sys_xor_gen_ll(__p, __v);\n" "}\n" "__DEVICE__ float __log10f(float __a) { return __nv_fast_log10f(__a); }\n" "__DEVICE__ float __log2f(float __a) { return __nv_fast_log2f(__a); }\n" "__DEVICE__ float __logf(float __a) { return __nv_fast_logf(__a); }\n" "__DEVICE__ double __longlong_as_double(long long __a) {\n" " return __nv_longlong_as_double(__a);\n" "}\n" "__DEVICE__ int __mul24(int __a, int __b) { return __nv_mul24(__a, __b); }\n" "__DEVICE__ long long __mul64hi(long long __a, long long __b) {\n" " return __nv_mul64hi(__a, __b);\n" "}\n" "__DEVICE__ int __mulhi(int __a, int __b) { return __nv_mulhi(__a, __b); }\n" "__DEVICE__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); }\n" "__DEVICE__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); }\n" "__DEVICE__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); }\n" "__DEVICE__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); }\n" "__DEVICE__ int __popc(int __a) { return __nv_popc(__a); }\n" "__DEVICE__ int __popcll(long long __a) { return __nv_popcll(__a); }\n" "__DEVICE__ float __powf(float __a, float __b) {\n" " return __nv_fast_powf(__a, __b);\n" "}\n" "\n" "// Parameter must have a known integer value.\n" "#define __prof_trigger(__a) __asm__ __volatile__(\"pmevent \\t%0;\" ::\"i\"(__a))\n" "__DEVICE__ int __rhadd(int __a, int __b) { return __nv_rhadd(__a, __b); }\n" "__DEVICE__ unsigned int __sad(int __a, int __b, unsigned int __c) {\n" " return __nv_sad(__a, __b, __c);\n" "}\n" "__DEVICE__ float __saturatef(float __a) { return __nv_saturatef(__a); }\n" "__DEVICE__ int __signbitd(double __a) { return __nv_signbitd(__a); }\n" "__DEVICE__ int __signbitf(float __a) { return __nv_signbitf(__a); }\n" "__DEVICE__ void __sincosf(float __a, float *__s, float *__c) {\n" " return __nv_fast_sincosf(__a, __s, __c);\n" "}\n" "__DEVICE__ float __sinf(float __a) { return __nv_fast_sinf(__a); }\n" "__DEVICE__ int __syncthreads_and(int __a) { return __nvvm_bar0_and(__a); }\n" "__DEVICE__ int __syncthreads_count(int __a) { return __nvvm_bar0_popc(__a); }\n" "__DEVICE__ int __syncthreads_or(int __a) { return __nvvm_bar0_or(__a); }\n" "__DEVICE__ float __tanf(float __a) { return __nv_fast_tanf(__a); }\n" "__DEVICE__ void __threadfence(void) { __nvvm_membar_gl(); }\n" "__DEVICE__ void __threadfence_block(void) { __nvvm_membar_cta(); };\n" "__DEVICE__ void __threadfence_system(void) { __nvvm_membar_sys(); };\n" "__DEVICE__ void __trap(void) { __asm__ __volatile__(\"trap;\"); }\n" "__DEVICE__ unsigned int __uAtomicAdd(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_add_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicAdd_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_add_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicAdd_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_add_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicAnd(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_and_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicAnd_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_and_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicAnd_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_and_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicCAS(unsigned int *__p, unsigned int __cmp,\n" " unsigned int __v) {\n" " return __nvvm_atom_cas_gen_i((int *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned int\n" "__uAtomicCAS_block(unsigned int *__p, unsigned int __cmp, unsigned int __v) {\n" " return __nvvm_atom_cta_cas_gen_i((int *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned int\n" "__uAtomicCAS_system(unsigned int *__p, unsigned int __cmp, unsigned int __v) {\n" " return __nvvm_atom_sys_cas_gen_i((int *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicDec(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_dec_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicDec_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_dec_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicDec_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_dec_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicExch(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_xchg_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicExch_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_xchg_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicExch_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_xchg_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicInc(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_inc_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicInc_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_inc_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicInc_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_inc_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMax(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_max_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMax_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_max_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMax_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_max_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMin(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_min_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMin_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_min_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicMin_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_min_gen_ui(__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicOr(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_or_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicOr_block(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_cta_or_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicOr_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_or_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicXor(unsigned int *__p, unsigned int __v) {\n" " return __nvvm_atom_xor_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicXor_block(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_cta_xor_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uAtomicXor_system(unsigned int *__p,\n" " unsigned int __v) {\n" " return __nvvm_atom_sys_xor_gen_i((int *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __uhadd(unsigned int __a, unsigned int __b) {\n" " return __nv_uhadd(__a, __b);\n" "}\n" "__DEVICE__ double __uint2double_rn(unsigned int __a) {\n" " return __nv_uint2double_rn(__a);\n" "}\n" "__DEVICE__ float __uint2float_rd(unsigned int __a) {\n" " return __nv_uint2float_rd(__a);\n" "}\n" "__DEVICE__ float __uint2float_rn(unsigned int __a) {\n" " return __nv_uint2float_rn(__a);\n" "}\n" "__DEVICE__ float __uint2float_ru(unsigned int __a) {\n" " return __nv_uint2float_ru(__a);\n" "}\n" "__DEVICE__ float __uint2float_rz(unsigned int __a) {\n" " return __nv_uint2float_rz(__a);\n" "}\n" "__DEVICE__ float __uint_as_float(unsigned int __a) {\n" " return __nv_uint_as_float(__a);\n" "} //\n" "__DEVICE__ double __ull2double_rd(unsigned long long __a) {\n" " return __nv_ull2double_rd(__a);\n" "}\n" "__DEVICE__ double __ull2double_rn(unsigned long long __a) {\n" " return __nv_ull2double_rn(__a);\n" "}\n" "__DEVICE__ double __ull2double_ru(unsigned long long __a) {\n" " return __nv_ull2double_ru(__a);\n" "}\n" "__DEVICE__ double __ull2double_rz(unsigned long long __a) {\n" " return __nv_ull2double_rz(__a);\n" "}\n" "__DEVICE__ float __ull2float_rd(unsigned long long __a) {\n" " return __nv_ull2float_rd(__a);\n" "}\n" "__DEVICE__ float __ull2float_rn(unsigned long long __a) {\n" " return __nv_ull2float_rn(__a);\n" "}\n" "__DEVICE__ float __ull2float_ru(unsigned long long __a) {\n" " return __nv_ull2float_ru(__a);\n" "}\n" "__DEVICE__ float __ull2float_rz(unsigned long long __a) {\n" " return __nv_ull2float_rz(__a);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAdd(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_add_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAdd_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_add_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAdd_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_add_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAnd(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_and_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAnd_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_and_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicAnd_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_and_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicCAS(unsigned long long *__p,\n" " unsigned long long __cmp,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cas_gen_ll((long long *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicCAS_block(unsigned long long *__p,\n" " unsigned long long __cmp,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_cas_gen_ll((long long *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicCAS_system(unsigned long long *__p,\n" " unsigned long long __cmp,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_cas_gen_ll((long long *)__p, __cmp, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicExch(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_xchg_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicExch_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_xchg_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicExch_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_xchg_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMax(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_max_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMax_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_max_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMax_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_max_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMin(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_min_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMin_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_min_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicMin_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_min_gen_ull(__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicOr(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_or_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicOr_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_or_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicOr_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_or_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicXor(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_xor_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicXor_block(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_cta_xor_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned long long __ullAtomicXor_system(unsigned long long *__p,\n" " unsigned long long __v) {\n" " return __nvvm_atom_sys_xor_gen_ll((long long *)__p, __v);\n" "}\n" "__DEVICE__ unsigned int __umul24(unsigned int __a, unsigned int __b) {\n" " return __nv_umul24(__a, __b);\n" "}\n" "__DEVICE__ unsigned long long __umul64hi(unsigned long long __a,\n" " unsigned long long __b) {\n" " return __nv_umul64hi(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __umulhi(unsigned int __a, unsigned int __b) {\n" " return __nv_umulhi(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __urhadd(unsigned int __a, unsigned int __b) {\n" " return __nv_urhadd(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __usad(unsigned int __a, unsigned int __b,\n" " unsigned int __c) {\n" " return __nv_usad(__a, __b, __c);\n" "}\n" "\n" "#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020\n" "__DEVICE__ unsigned int __vabs2(unsigned int __a) { return __nv_vabs2(__a); }\n" "__DEVICE__ unsigned int __vabs4(unsigned int __a) { return __nv_vabs4(__a); }\n" "__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {\n" " return __nv_vabsdiffs2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {\n" " return __nv_vabsdiffs4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vabsdiffu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vabsdiffu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vabsss2(unsigned int __a) {\n" " return __nv_vabsss2(__a);\n" "}\n" "__DEVICE__ unsigned int __vabsss4(unsigned int __a) {\n" " return __nv_vabsss4(__a);\n" "}\n" "__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {\n" " return __nv_vadd2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {\n" " return __nv_vadd4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {\n" " return __nv_vaddss2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {\n" " return __nv_vaddss4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {\n" " return __nv_vaddus2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {\n" " return __nv_vaddus4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {\n" " return __nv_vavgs2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {\n" " return __nv_vavgs4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vavgu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vavgu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpeq2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpeq4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpges2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpges4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgeu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgeu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgts2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgts4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgtu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpgtu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmples2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmples4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpleu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpleu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmplts2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmplts4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpltu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpltu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpne2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {\n" " return __nv_vcmpne4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vhaddu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vhaddu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {\n" " return __nv_vmaxs2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {\n" " return __nv_vmaxs4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vmaxu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vmaxu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {\n" " return __nv_vmins2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {\n" " return __nv_vmins4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vminu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vminu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __nv_vneg2(__a); }\n" "__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __nv_vneg4(__a); }\n" "__DEVICE__ unsigned int __vnegss2(unsigned int __a) {\n" " return __nv_vnegss2(__a);\n" "}\n" "__DEVICE__ unsigned int __vnegss4(unsigned int __a) {\n" " return __nv_vnegss4(__a);\n" "}\n" "__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsads2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsads4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsadu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsadu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {\n" " return __nv_vseteq2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {\n" " return __nv_vseteq4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetges2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetges4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgeu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgeu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgts2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgts4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgtu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetgtu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetles2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetles4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetleu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetleu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetlts2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetlts4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetltu2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetltu4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetne2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsetne4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsub2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsub4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsubss2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsubss4(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {\n" " return __nv_vsubus2(__a, __b);\n" "}\n" "__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {\n" " return __nv_vsubus4(__a, __b);\n" "}\n" "#else // CUDA_VERSION >= 9020\n" "// CUDA no longer provides inline assembly (or bitcode) implementation of these\n" "// functions, so we have to reimplment them. The implementation is naive and is\n" "// not optimized for performance.\n" "\n" "// Helper function to convert N-bit boolean subfields into all-0 or all-1.\n" "// E.g. __bool2mask(0x01000100,8) -> 0xff00ff00\n" "// __bool2mask(0x00010000,16) -> 0xffff0000\n" "__DEVICE__ unsigned int __bool2mask(unsigned int __a, int shift) {\n" " return (__a << shift) - __a;\n" "}\n" "__DEVICE__ unsigned int __vabs2(unsigned int __a) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(0), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabs4(unsigned int __a) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(0), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "\n" "__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabsss2(unsigned int __a) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(0), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vabsss4(unsigned int __a) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(0), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd2.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd4.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd2.u32.u32.u32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vadd4.u32.u32.u32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vavrg2.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vavrg4.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vavrg2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vavrg4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.eq %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vseteq2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.eq %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vseteq4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.s32.s32.ge %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetges2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.s32.s32.ge %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetges4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.ge %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgeu2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.ge %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgeu4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.s32.s32.gt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgts2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.s32.s32.gt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgts4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.gt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgtu2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.gt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetgtu4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.s32.s32.le %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetles2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.s32.s32.le %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetles4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.le %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetleu2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.le %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetleu4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.s32.s32.lt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetlts2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.s32.s32.lt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetlts4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.lt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetltu2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.lt %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetltu4(__a, __b), 8);\n" "}\n" "__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset2.u32.u32.ne %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetne2(__a, __b), 16);\n" "}\n" "__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vset4.u32.u32.ne %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {\n" " return __bool2mask(__vsetne4(__a, __b), 8);\n" "}\n" "\n" "// Based on ITEM 23 in AIM-239: http://dspace.mit.edu/handle/1721.1/6086\n" "// (a & b) + (a | b) = a + b = (a ^ b) + 2 * (a & b) =>\n" "// (a + b) / 2 = ((a ^ b) >> 1) + (a & b)\n" "// To operate on multiple sub-elements we need to make sure to mask out bits\n" "// that crossed over into adjacent elements during the shift.\n" "__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {\n" " return (((__a ^ __b) >> 1) & ~0x80008000u) + (__a & __b);\n" "}\n" "__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {\n" " return (((__a ^ __b) >> 1) & ~0x80808080u) + (__a & __b);\n" "}\n" "\n" "__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " if ((__a & 0x8000) && (__b & 0x8000)) {\n" " // Work around a bug in ptxas which produces invalid result if low element\n" " // is negative.\n" " unsigned mask = __vcmpgts2(__a, __b);\n" " r = (__a & mask) | (__b & ~mask);\n" " } else {\n" " __asm__(\"vmax2.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " }\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmax4.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmax2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmax4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmin2.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmin4.s32.s32.s32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmin2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vmin4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.s32.s32.s32.add %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.s32.s32.s32.add %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff2.u32.u32.u32.add %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vabsdiff4.u32.u32.u32.add %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "\n" "__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub2.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __vsub2(0, __a); }\n" "\n" "__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub4.u32.u32.u32 %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __vsub4(0, __a); }\n" "__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub2.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vnegss2(unsigned int __a) {\n" " return __vsubss2(0, __a);\n" "}\n" "__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub4.s32.s32.s32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vnegss4(unsigned int __a) {\n" " return __vsubss4(0, __a);\n" "}\n" "__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub2.u32.u32.u32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {\n" " unsigned int r;\n" " __asm__(\"vsub4.u32.u32.u32.sat %0,%1,%2,%3;\"\n" " : \"=r\"(r)\n" " : \"r\"(__a), \"r\"(__b), \"r\"(0));\n" " return r;\n" "}\n" "#endif // CUDA_VERSION >= 9020\n" "\n" "// For OpenMP we require the user to include as we need to know what\n" "// clock_t is on the system.\n" "#ifndef __OPENMP_NVPTX__\n" "__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); }\n" "#endif\n" "__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }\n" "\n" "// These functions shouldn't be declared when including this header\n" "// for math function resolution purposes.\n" "#ifndef __OPENMP_NVPTX__\n" "__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {\n" " return __builtin_memcpy(__a, __b, __c);\n" "}\n" "__DEVICE__ void *memset(void *__a, int __b, size_t __c) {\n" " return __builtin_memset(__a, __b, __c);\n" "}\n" "#endif\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__\n" "" } , { "/builtins/__clang_cuda_intrinsics.h" , "/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG_CUDA_INTRINSICS_H__\n" "#define __CLANG_CUDA_INTRINSICS_H__\n" "#ifndef __CUDA__\n" "#error \"This file is for CUDA compilation only.\"\n" "#endif\n" "\n" "// sm_30 intrinsics: __shfl_{up,down,xor}.\n" "\n" "#define __SM_30_INTRINSICS_H__\n" "#define __SM_30_INTRINSICS_HPP__\n" "\n" "#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300\n" "\n" "#pragma push_macro(\"__MAKE_SHUFFLES\")\n" "#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask, \\\n" " __Type) \\\n" " inline __device__ int __FnName(int __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return __IntIntrinsic(__val, __offset, \\\n" " ((warpSize - __width) << 8) | (__Mask)); \\\n" " } \\\n" " inline __device__ float __FnName(float __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return __FloatIntrinsic(__val, __offset, \\\n" " ((warpSize - __width) << 8) | (__Mask)); \\\n" " } \\\n" " inline __device__ unsigned int __FnName(unsigned int __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ long long __FnName(long long __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " struct __Bits { \\\n" " int __a, __b; \\\n" " }; \\\n" " _Static_assert(sizeof(__val) == sizeof(__Bits)); \\\n" " _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \\\n" " __Bits __tmp; \\\n" " memcpy(&__tmp, &__val, sizeof(__val)); \\\n" " __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \\\n" " __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \\\n" " long long __ret; \\\n" " memcpy(&__ret, &__tmp, sizeof(__tmp)); \\\n" " return __ret; \\\n" " } \\\n" " inline __device__ long __FnName(long __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " _Static_assert(sizeof(long) == sizeof(long long) || \\\n" " sizeof(long) == sizeof(int)); \\\n" " if (sizeof(long) == sizeof(long long)) { \\\n" " return static_cast( \\\n" " ::__FnName(static_cast(__val), __offset, __width)); \\\n" " } else if (sizeof(long) == sizeof(int)) { \\\n" " return static_cast( \\\n" " ::__FnName(static_cast(__val), __offset, __width)); \\\n" " } \\\n" " } \\\n" " inline __device__ unsigned long __FnName( \\\n" " unsigned long __val, __Type __offset, int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ unsigned long long __FnName( \\\n" " unsigned long long __val, __Type __offset, int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ double __FnName(double __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " long long __tmp; \\\n" " _Static_assert(sizeof(__tmp) == sizeof(__val)); \\\n" " memcpy(&__tmp, &__val, sizeof(__val)); \\\n" " __tmp = ::__FnName(__tmp, __offset, __width); \\\n" " double __ret; \\\n" " memcpy(&__ret, &__tmp, sizeof(__ret)); \\\n" " return __ret; \\\n" " }\n" "\n" "__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f, int);\n" "// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=\n" "// maxLane.\n" "__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0,\n" " unsigned int);\n" "__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f,\n" " unsigned int);\n" "__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,\n" " int);\n" "#pragma pop_macro(\"__MAKE_SHUFFLES\")\n" "\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300\n" "\n" "#if CUDA_VERSION >= 9000\n" "#if (!defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300)\n" "// __shfl_sync_* variants available in CUDA-9\n" "#pragma push_macro(\"__MAKE_SYNC_SHUFFLES\")\n" "#define __MAKE_SYNC_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, \\\n" " __Mask, __Type) \\\n" " inline __device__ int __FnName(unsigned int __mask, int __val, \\\n" " __Type __offset, int __width = warpSize) { \\\n" " return __IntIntrinsic(__mask, __val, __offset, \\\n" " ((warpSize - __width) << 8) | (__Mask)); \\\n" " } \\\n" " inline __device__ float __FnName(unsigned int __mask, float __val, \\\n" " __Type __offset, int __width = warpSize) { \\\n" " return __FloatIntrinsic(__mask, __val, __offset, \\\n" " ((warpSize - __width) << 8) | (__Mask)); \\\n" " } \\\n" " inline __device__ unsigned int __FnName(unsigned int __mask, \\\n" " unsigned int __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(__mask, static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ long long __FnName(unsigned int __mask, long long __val, \\\n" " __Type __offset, \\\n" " int __width = warpSize) { \\\n" " struct __Bits { \\\n" " int __a, __b; \\\n" " }; \\\n" " _Static_assert(sizeof(__val) == sizeof(__Bits)); \\\n" " _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \\\n" " __Bits __tmp; \\\n" " memcpy(&__tmp, &__val, sizeof(__val)); \\\n" " __tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \\\n" " __tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \\\n" " long long __ret; \\\n" " memcpy(&__ret, &__tmp, sizeof(__tmp)); \\\n" " return __ret; \\\n" " } \\\n" " inline __device__ unsigned long long __FnName( \\\n" " unsigned int __mask, unsigned long long __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(__mask, static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ long __FnName(unsigned int __mask, long __val, \\\n" " __Type __offset, int __width = warpSize) { \\\n" " _Static_assert(sizeof(long) == sizeof(long long) || \\\n" " sizeof(long) == sizeof(int)); \\\n" " if (sizeof(long) == sizeof(long long)) { \\\n" " return static_cast(::__FnName( \\\n" " __mask, static_cast(__val), __offset, __width)); \\\n" " } else if (sizeof(long) == sizeof(int)) { \\\n" " return static_cast( \\\n" " ::__FnName(__mask, static_cast(__val), __offset, __width)); \\\n" " } \\\n" " } \\\n" " inline __device__ unsigned long __FnName( \\\n" " unsigned int __mask, unsigned long __val, __Type __offset, \\\n" " int __width = warpSize) { \\\n" " return static_cast( \\\n" " ::__FnName(__mask, static_cast(__val), __offset, __width)); \\\n" " } \\\n" " inline __device__ double __FnName(unsigned int __mask, double __val, \\\n" " __Type __offset, int __width = warpSize) { \\\n" " long long __tmp; \\\n" " _Static_assert(sizeof(__tmp) == sizeof(__val)); \\\n" " memcpy(&__tmp, &__val, sizeof(__val)); \\\n" " __tmp = ::__FnName(__mask, __tmp, __offset, __width); \\\n" " double __ret; \\\n" " memcpy(&__ret, &__tmp, sizeof(__ret)); \\\n" " return __ret; \\\n" " }\n" "__MAKE_SYNC_SHUFFLES(__shfl_sync, __nvvm_shfl_sync_idx_i32,\n" " __nvvm_shfl_sync_idx_f32, 0x1f, int);\n" "// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >=\n" "// maxLane.\n" "__MAKE_SYNC_SHUFFLES(__shfl_up_sync, __nvvm_shfl_sync_up_i32,\n" " __nvvm_shfl_sync_up_f32, 0, unsigned int);\n" "__MAKE_SYNC_SHUFFLES(__shfl_down_sync, __nvvm_shfl_sync_down_i32,\n" " __nvvm_shfl_sync_down_f32, 0x1f, unsigned int);\n" "__MAKE_SYNC_SHUFFLES(__shfl_xor_sync, __nvvm_shfl_sync_bfly_i32,\n" " __nvvm_shfl_sync_bfly_f32, 0x1f, int);\n" "#pragma pop_macro(\"__MAKE_SYNC_SHUFFLES\")\n" "\n" "inline __device__ void __syncwarp(unsigned int mask = 0xffffffff) {\n" " return __nvvm_bar_warp_sync(mask);\n" "}\n" "\n" "inline __device__ void __barrier_sync(unsigned int id) {\n" " __nvvm_barrier_sync(id);\n" "}\n" "\n" "inline __device__ void __barrier_sync_count(unsigned int id,\n" " unsigned int count) {\n" " __nvvm_barrier_sync_cnt(id, count);\n" "}\n" "\n" "inline __device__ int __all_sync(unsigned int mask, int pred) {\n" " return __nvvm_vote_all_sync(mask, pred);\n" "}\n" "\n" "inline __device__ int __any_sync(unsigned int mask, int pred) {\n" " return __nvvm_vote_any_sync(mask, pred);\n" "}\n" "\n" "inline __device__ int __uni_sync(unsigned int mask, int pred) {\n" " return __nvvm_vote_uni_sync(mask, pred);\n" "}\n" "\n" "inline __device__ unsigned int __ballot_sync(unsigned int mask, int pred) {\n" " return __nvvm_vote_ballot_sync(mask, pred);\n" "}\n" "\n" "inline __device__ unsigned int __activemask() {\n" "#if CUDA_VERSION < 9020\n" " return __nvvm_vote_ballot(1);\n" "#else\n" " unsigned int mask;\n" " asm volatile(\"activemask.b32 %0;\" : \"=r\"(mask));\n" " return mask;\n" "#endif\n" "}\n" "\n" "inline __device__ unsigned int __fns(unsigned mask, unsigned base, int offset) {\n" " return __nvvm_fns(mask, base, offset);\n" "}\n" "\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300\n" "\n" "// Define __match* builtins CUDA-9 headers expect to see.\n" "#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700\n" "inline __device__ unsigned int __match32_any_sync(unsigned int mask,\n" " unsigned int value) {\n" " return __nvvm_match_any_sync_i32(mask, value);\n" "}\n" "\n" "inline __device__ unsigned int\n" "__match64_any_sync(unsigned int mask, unsigned long long value) {\n" " return __nvvm_match_any_sync_i64(mask, value);\n" "}\n" "\n" "inline __device__ unsigned int\n" "__match32_all_sync(unsigned int mask, unsigned int value, int *pred) {\n" " return __nvvm_match_all_sync_i32p(mask, value, pred);\n" "}\n" "\n" "inline __device__ unsigned int\n" "__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {\n" " return __nvvm_match_all_sync_i64p(mask, value, pred);\n" "}\n" "#include \"crt/sm_70_rt.hpp\"\n" "\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700\n" "#endif // __CUDA_VERSION >= 9000\n" "\n" "// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}.\n" "\n" "// Prevent the vanilla sm_32 intrinsics header from being included.\n" "#define __SM_32_INTRINSICS_H__\n" "#define __SM_32_INTRINSICS_HPP__\n" "\n" "#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320\n" "\n" "inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); }\n" "inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); }\n" "inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); }\n" "inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); }\n" "inline __device__ long long __ldg(const long long *ptr) {\n" " return __nvvm_ldg_ll(ptr);\n" "}\n" "inline __device__ unsigned char __ldg(const unsigned char *ptr) {\n" " return __nvvm_ldg_uc(ptr);\n" "}\n" "inline __device__ signed char __ldg(const signed char *ptr) {\n" " return __nvvm_ldg_uc((const unsigned char *)ptr);\n" "}\n" "inline __device__ unsigned short __ldg(const unsigned short *ptr) {\n" " return __nvvm_ldg_us(ptr);\n" "}\n" "inline __device__ unsigned int __ldg(const unsigned int *ptr) {\n" " return __nvvm_ldg_ui(ptr);\n" "}\n" "inline __device__ unsigned long __ldg(const unsigned long *ptr) {\n" " return __nvvm_ldg_ul(ptr);\n" "}\n" "inline __device__ unsigned long long __ldg(const unsigned long long *ptr) {\n" " return __nvvm_ldg_ull(ptr);\n" "}\n" "inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); }\n" "inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); }\n" "\n" "inline __device__ char2 __ldg(const char2 *ptr) {\n" " typedef char c2 __attribute__((ext_vector_type(2)));\n" " // We can assume that ptr is aligned at least to char2's alignment, but the\n" " // load will assume that ptr is aligned to char2's alignment. This is only\n" " // safe if alignof(c2) <= alignof(char2).\n" " c2 rv = __nvvm_ldg_c2(reinterpret_cast(ptr));\n" " char2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ char4 __ldg(const char4 *ptr) {\n" " typedef char c4 __attribute__((ext_vector_type(4)));\n" " c4 rv = __nvvm_ldg_c4(reinterpret_cast(ptr));\n" " char4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ short2 __ldg(const short2 *ptr) {\n" " typedef short s2 __attribute__((ext_vector_type(2)));\n" " s2 rv = __nvvm_ldg_s2(reinterpret_cast(ptr));\n" " short2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ short4 __ldg(const short4 *ptr) {\n" " typedef short s4 __attribute__((ext_vector_type(4)));\n" " s4 rv = __nvvm_ldg_s4(reinterpret_cast(ptr));\n" " short4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ int2 __ldg(const int2 *ptr) {\n" " typedef int i2 __attribute__((ext_vector_type(2)));\n" " i2 rv = __nvvm_ldg_i2(reinterpret_cast(ptr));\n" " int2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ int4 __ldg(const int4 *ptr) {\n" " typedef int i4 __attribute__((ext_vector_type(4)));\n" " i4 rv = __nvvm_ldg_i4(reinterpret_cast(ptr));\n" " int4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ longlong2 __ldg(const longlong2 *ptr) {\n" " typedef long long ll2 __attribute__((ext_vector_type(2)));\n" " ll2 rv = __nvvm_ldg_ll2(reinterpret_cast(ptr));\n" " longlong2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "\n" "inline __device__ uchar2 __ldg(const uchar2 *ptr) {\n" " typedef unsigned char uc2 __attribute__((ext_vector_type(2)));\n" " uc2 rv = __nvvm_ldg_uc2(reinterpret_cast(ptr));\n" " uchar2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ uchar4 __ldg(const uchar4 *ptr) {\n" " typedef unsigned char uc4 __attribute__((ext_vector_type(4)));\n" " uc4 rv = __nvvm_ldg_uc4(reinterpret_cast(ptr));\n" " uchar4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ ushort2 __ldg(const ushort2 *ptr) {\n" " typedef unsigned short us2 __attribute__((ext_vector_type(2)));\n" " us2 rv = __nvvm_ldg_us2(reinterpret_cast(ptr));\n" " ushort2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ ushort4 __ldg(const ushort4 *ptr) {\n" " typedef unsigned short us4 __attribute__((ext_vector_type(4)));\n" " us4 rv = __nvvm_ldg_us4(reinterpret_cast(ptr));\n" " ushort4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ uint2 __ldg(const uint2 *ptr) {\n" " typedef unsigned int ui2 __attribute__((ext_vector_type(2)));\n" " ui2 rv = __nvvm_ldg_ui2(reinterpret_cast(ptr));\n" " uint2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ uint4 __ldg(const uint4 *ptr) {\n" " typedef unsigned int ui4 __attribute__((ext_vector_type(4)));\n" " ui4 rv = __nvvm_ldg_ui4(reinterpret_cast(ptr));\n" " uint4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) {\n" " typedef unsigned long long ull2 __attribute__((ext_vector_type(2)));\n" " ull2 rv = __nvvm_ldg_ull2(reinterpret_cast(ptr));\n" " ulonglong2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "\n" "inline __device__ float2 __ldg(const float2 *ptr) {\n" " typedef float f2 __attribute__((ext_vector_type(2)));\n" " f2 rv = __nvvm_ldg_f2(reinterpret_cast(ptr));\n" " float2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "inline __device__ float4 __ldg(const float4 *ptr) {\n" " typedef float f4 __attribute__((ext_vector_type(4)));\n" " f4 rv = __nvvm_ldg_f4(reinterpret_cast(ptr));\n" " float4 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " ret.z = rv[2];\n" " ret.w = rv[3];\n" " return ret;\n" "}\n" "inline __device__ double2 __ldg(const double2 *ptr) {\n" " typedef double d2 __attribute__((ext_vector_type(2)));\n" " d2 rv = __nvvm_ldg_d2(reinterpret_cast(ptr));\n" " double2 ret;\n" " ret.x = rv[0];\n" " ret.y = rv[1];\n" " return ret;\n" "}\n" "\n" "// TODO: Implement these as intrinsics, so the backend can work its magic on\n" "// these. Alternatively, we could implement these as plain C and try to get\n" "// llvm to recognize the relevant patterns.\n" "inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32,\n" " unsigned shiftWidth) {\n" " unsigned result;\n" " asm(\"shf.l.wrap.b32 %0, %1, %2, %3;\"\n" " : \"=r\"(result)\n" " : \"r\"(low32), \"r\"(high32), \"r\"(shiftWidth));\n" " return result;\n" "}\n" "inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32,\n" " unsigned shiftWidth) {\n" " unsigned result;\n" " asm(\"shf.l.clamp.b32 %0, %1, %2, %3;\"\n" " : \"=r\"(result)\n" " : \"r\"(low32), \"r\"(high32), \"r\"(shiftWidth));\n" " return result;\n" "}\n" "inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32,\n" " unsigned shiftWidth) {\n" " unsigned result;\n" " asm(\"shf.r.wrap.b32 %0, %1, %2, %3;\"\n" " : \"=r\"(result)\n" " : \"r\"(low32), \"r\"(high32), \"r\"(shiftWidth));\n" " return result;\n" "}\n" "inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,\n" " unsigned shiftWidth) {\n" " unsigned ret;\n" " asm(\"shf.r.clamp.b32 %0, %1, %2, %3;\"\n" " : \"=r\"(ret)\n" " : \"r\"(low32), \"r\"(high32), \"r\"(shiftWidth));\n" " return ret;\n" "}\n" "\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320\n" "\n" "#if CUDA_VERSION >= 11000\n" "extern \"C\" {\n" "__device__ inline size_t __nv_cvta_generic_to_global_impl(const void *__ptr) {\n" " return (size_t)(void __attribute__((address_space(1))) *)__ptr;\n" "}\n" "__device__ inline size_t __nv_cvta_generic_to_shared_impl(const void *__ptr) {\n" " return (size_t)(void __attribute__((address_space(3))) *)__ptr;\n" "}\n" "__device__ inline size_t __nv_cvta_generic_to_constant_impl(const void *__ptr) {\n" " return (size_t)(void __attribute__((address_space(4))) *)__ptr;\n" "}\n" "__device__ inline size_t __nv_cvta_generic_to_local_impl(const void *__ptr) {\n" " return (size_t)(void __attribute__((address_space(5))) *)__ptr;\n" "}\n" "__device__ inline void *__nv_cvta_global_to_generic_impl(size_t __ptr) {\n" " return (void *)(void __attribute__((address_space(1))) *)__ptr;\n" "}\n" "__device__ inline void *__nv_cvta_shared_to_generic_impl(size_t __ptr) {\n" " return (void *)(void __attribute__((address_space(3))) *)__ptr;\n" "}\n" "__device__ inline void *__nv_cvta_constant_to_generic_impl(size_t __ptr) {\n" " return (void *)(void __attribute__((address_space(4))) *)__ptr;\n" "}\n" "__device__ inline void *__nv_cvta_local_to_generic_impl(size_t __ptr) {\n" " return (void *)(void __attribute__((address_space(5))) *)__ptr;\n" "}\n" "__device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {\n" " return __nv_cvta_generic_to_shared_impl(__ptr);\n" "}\n" "} // extern \"C\"\n" "\n" "#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800\n" "__device__ inline unsigned __reduce_add_sync(unsigned __mask,\n" " unsigned __value) {\n" " return __nvvm_redux_sync_add(__mask, __value);\n" "}\n" "__device__ inline unsigned __reduce_min_sync(unsigned __mask,\n" " unsigned __value) {\n" " return __nvvm_redux_sync_umin(__mask, __value);\n" "}\n" "__device__ inline unsigned __reduce_max_sync(unsigned __mask,\n" " unsigned __value) {\n" " return __nvvm_redux_sync_umax(__mask, __value);\n" "}\n" "__device__ inline int __reduce_min_sync(unsigned __mask, int __value) {\n" " return __nvvm_redux_sync_min(__mask, __value);\n" "}\n" "__device__ inline int __reduce_max_sync(unsigned __mask, int __value) {\n" " return __nvvm_redux_sync_max(__mask, __value);\n" "}\n" "__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) {\n" " return __nvvm_redux_sync_or(__mask, __value);\n" "}\n" "__device__ inline unsigned __reduce_and_sync(unsigned __mask,\n" " unsigned __value) {\n" " return __nvvm_redux_sync_and(__mask, __value);\n" "}\n" "__device__ inline unsigned __reduce_xor_sync(unsigned __mask,\n" " unsigned __value) {\n" " return __nvvm_redux_sync_xor(__mask, __value);\n" "}\n" "\n" "__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst,\n" " const void *__src,\n" " unsigned __src_size) {\n" " __nvvm_cp_async_ca_shared_global_4(\n" " (void __attribute__((address_space(3))) *)__dst,\n" " (const void __attribute__((address_space(1))) *)__src, __src_size);\n" "}\n" "__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst,\n" " const void *__src,\n" " unsigned __src_size) {\n" " __nvvm_cp_async_ca_shared_global_8(\n" " (void __attribute__((address_space(3))) *)__dst,\n" " (const void __attribute__((address_space(1))) *)__src, __src_size);\n" "}\n" "__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst,\n" " const void *__src,\n" " unsigned __src_size) {\n" " __nvvm_cp_async_ca_shared_global_16(\n" " (void __attribute__((address_space(3))) *)__dst,\n" " (const void __attribute__((address_space(1))) *)__src, __src_size);\n" "}\n" "\n" "__device__ inline void *\n" "__nv_associate_access_property(const void *__ptr, unsigned long long __prop) {\n" " // TODO: it appears to provide compiler with some sort of a hint. We do not\n" " // know what exactly it is supposed to do. However, CUDA headers suggest that\n" " // just passing through __ptr should not affect correctness. They do so on\n" " // pre-sm80 GPUs where this builtin is not available.\n" " return (void*)__ptr;\n" "}\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800\n" "\n" "#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900\n" "__device__ inline unsigned __isCtaShared(const void *ptr) {\n" " return __isShared(ptr);\n" "}\n" "\n" "__device__ inline unsigned __isClusterShared(const void *__ptr) {\n" " return __nvvm_isspacep_shared_cluster(__ptr);\n" "}\n" "\n" "__device__ inline void *__cluster_map_shared_rank(const void *__ptr,\n" " unsigned __rank) {\n" " return __nvvm_mapa((void *)__ptr, __rank);\n" "}\n" "\n" "__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {\n" " return __nvvm_getctarank((void *)__ptr);\n" "}\n" "\n" "__device__ inline uint2\n" "__cluster_map_shared_multicast(const void *__ptr,\n" " unsigned int __cluster_cta_mask) {\n" " return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),\n" " __cluster_cta_mask);\n" "}\n" "\n" "__device__ inline unsigned __clusterDimIsSpecified() {\n" " return __nvvm_is_explicit_cluster();\n" "}\n" "\n" "__device__ inline dim3 __clusterDim() {\n" " return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),\n" " __nvvm_read_ptx_sreg_cluster_nctaid_y(),\n" " __nvvm_read_ptx_sreg_cluster_nctaid_z());\n" "}\n" "\n" "__device__ inline dim3 __clusterRelativeBlockIdx() {\n" " return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),\n" " __nvvm_read_ptx_sreg_cluster_ctaid_y(),\n" " __nvvm_read_ptx_sreg_cluster_ctaid_z());\n" "}\n" "\n" "__device__ inline dim3 __clusterGridDimInClusters() {\n" " return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),\n" " __nvvm_read_ptx_sreg_nclusterid_y(),\n" " __nvvm_read_ptx_sreg_nclusterid_z());\n" "}\n" "\n" "__device__ inline dim3 __clusterIdx() {\n" " return dim3(__nvvm_read_ptx_sreg_clusterid_x(),\n" " __nvvm_read_ptx_sreg_clusterid_y(),\n" " __nvvm_read_ptx_sreg_clusterid_z());\n" "}\n" "\n" "__device__ inline unsigned __clusterRelativeBlockRank() {\n" " return __nvvm_read_ptx_sreg_cluster_ctarank();\n" "}\n" "\n" "__device__ inline unsigned __clusterSizeInBlocks() {\n" " return __nvvm_read_ptx_sreg_cluster_nctarank();\n" "}\n" "\n" "__device__ inline void __cluster_barrier_arrive() {\n" " __nvvm_barrier_cluster_arrive();\n" "}\n" "\n" "__device__ inline void __cluster_barrier_arrive_relaxed() {\n" " __nvvm_barrier_cluster_arrive_relaxed();\n" "}\n" "\n" "__device__ inline void __cluster_barrier_wait() {\n" " __nvvm_barrier_cluster_wait();\n" "}\n" "\n" "__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }\n" "\n" "__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {\n" " float2 __ret;\n" " __asm__(\"atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y));\n" " return __ret;\n" "}\n" "\n" "__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {\n" " float2 __ret;\n" " __asm__(\"atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y));\n" " return __ret;\n" "}\n" "\n" "__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {\n" " float2 __ret;\n" " __asm__(\"atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y));\n" " return __ret;\n" "}\n" "\n" "__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {\n" " float4 __ret;\n" " __asm__(\"atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y), \"=f\"(__ret.z), \"=f\"(__ret.w)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y), \"f\"(__val.z), \"f\"(__val.w));\n" " return __ret;\n" "}\n" "\n" "__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {\n" " float4 __ret;\n" " __asm__(\n" " \"atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y), \"=f\"(__ret.z), \"=f\"(__ret.w)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y), \"f\"(__val.z), \"f\"(__val.w));\n" " return __ret;\n" "}\n" "\n" "__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {\n" " float4 __ret;\n" " __asm__(\n" " \"atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};\"\n" " : \"=f\"(__ret.x), \"=f\"(__ret.y), \"=f\"(__ret.z), \"=f\"(__ret.w)\n" " : \"l\"(__ptr), \"f\"(__val.x), \"f\"(__val.y), \"f\"(__val.z), \"f\"(__val.w)\n" " :);\n" " return __ret;\n" "}\n" "\n" "#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900\n" "#endif // CUDA_VERSION >= 11000\n" "\n" "#endif // defined(__CLANG_CUDA_INTRINSICS_H__)\n" "" } , { "/builtins/__clang_cuda_libdevice_declares.h" , "/*===-- __clang_cuda_libdevice_declares.h - decls for libdevice functions --===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_CUDA_LIBDEVICE_DECLARES_H__\n" "#define __CLANG_CUDA_LIBDEVICE_DECLARES_H__\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "#if defined(__OPENMP_NVPTX__)\n" "#define __DEVICE__\n" "#pragma omp begin assumes ext_spmd_amenable no_openmp\n" "#elif defined(__CUDA__)\n" "#define __DEVICE__ __device__\n" "#endif\n" "\n" "__DEVICE__ int __nv_abs(int __a);\n" "__DEVICE__ double __nv_acos(double __a);\n" "__DEVICE__ float __nv_acosf(float __a);\n" "__DEVICE__ double __nv_acosh(double __a);\n" "__DEVICE__ float __nv_acoshf(float __a);\n" "__DEVICE__ double __nv_asin(double __a);\n" "__DEVICE__ float __nv_asinf(float __a);\n" "__DEVICE__ double __nv_asinh(double __a);\n" "__DEVICE__ float __nv_asinhf(float __a);\n" "__DEVICE__ double __nv_atan2(double __a, double __b);\n" "__DEVICE__ float __nv_atan2f(float __a, float __b);\n" "__DEVICE__ double __nv_atan(double __a);\n" "__DEVICE__ float __nv_atanf(float __a);\n" "__DEVICE__ double __nv_atanh(double __a);\n" "__DEVICE__ float __nv_atanhf(float __a);\n" "__DEVICE__ int __nv_brev(int __a);\n" "__DEVICE__ long long __nv_brevll(long long __a);\n" "__DEVICE__ int __nv_byte_perm(int __a, int __b, int __c);\n" "__DEVICE__ double __nv_cbrt(double __a);\n" "__DEVICE__ float __nv_cbrtf(float __a);\n" "__DEVICE__ double __nv_ceil(double __a);\n" "__DEVICE__ float __nv_ceilf(float __a);\n" "__DEVICE__ int __nv_clz(int __a);\n" "__DEVICE__ int __nv_clzll(long long __a);\n" "__DEVICE__ double __nv_copysign(double __a, double __b);\n" "__DEVICE__ float __nv_copysignf(float __a, float __b);\n" "__DEVICE__ double __nv_cos(double __a);\n" "__DEVICE__ float __nv_cosf(float __a);\n" "__DEVICE__ double __nv_cosh(double __a);\n" "__DEVICE__ float __nv_coshf(float __a);\n" "__DEVICE__ double __nv_cospi(double __a);\n" "__DEVICE__ float __nv_cospif(float __a);\n" "__DEVICE__ double __nv_cyl_bessel_i0(double __a);\n" "__DEVICE__ float __nv_cyl_bessel_i0f(float __a);\n" "__DEVICE__ double __nv_cyl_bessel_i1(double __a);\n" "__DEVICE__ float __nv_cyl_bessel_i1f(float __a);\n" "__DEVICE__ double __nv_dadd_rd(double __a, double __b);\n" "__DEVICE__ double __nv_dadd_rn(double __a, double __b);\n" "__DEVICE__ double __nv_dadd_ru(double __a, double __b);\n" "__DEVICE__ double __nv_dadd_rz(double __a, double __b);\n" "__DEVICE__ double __nv_ddiv_rd(double __a, double __b);\n" "__DEVICE__ double __nv_ddiv_rn(double __a, double __b);\n" "__DEVICE__ double __nv_ddiv_ru(double __a, double __b);\n" "__DEVICE__ double __nv_ddiv_rz(double __a, double __b);\n" "__DEVICE__ double __nv_dmul_rd(double __a, double __b);\n" "__DEVICE__ double __nv_dmul_rn(double __a, double __b);\n" "__DEVICE__ double __nv_dmul_ru(double __a, double __b);\n" "__DEVICE__ double __nv_dmul_rz(double __a, double __b);\n" "__DEVICE__ float __nv_double2float_rd(double __a);\n" "__DEVICE__ float __nv_double2float_rn(double __a);\n" "__DEVICE__ float __nv_double2float_ru(double __a);\n" "__DEVICE__ float __nv_double2float_rz(double __a);\n" "__DEVICE__ int __nv_double2hiint(double __a);\n" "__DEVICE__ int __nv_double2int_rd(double __a);\n" "__DEVICE__ int __nv_double2int_rn(double __a);\n" "__DEVICE__ int __nv_double2int_ru(double __a);\n" "__DEVICE__ int __nv_double2int_rz(double __a);\n" "__DEVICE__ long long __nv_double2ll_rd(double __a);\n" "__DEVICE__ long long __nv_double2ll_rn(double __a);\n" "__DEVICE__ long long __nv_double2ll_ru(double __a);\n" "__DEVICE__ long long __nv_double2ll_rz(double __a);\n" "__DEVICE__ int __nv_double2loint(double __a);\n" "__DEVICE__ unsigned int __nv_double2uint_rd(double __a);\n" "__DEVICE__ unsigned int __nv_double2uint_rn(double __a);\n" "__DEVICE__ unsigned int __nv_double2uint_ru(double __a);\n" "__DEVICE__ unsigned int __nv_double2uint_rz(double __a);\n" "__DEVICE__ unsigned long long __nv_double2ull_rd(double __a);\n" "__DEVICE__ unsigned long long __nv_double2ull_rn(double __a);\n" "__DEVICE__ unsigned long long __nv_double2ull_ru(double __a);\n" "__DEVICE__ unsigned long long __nv_double2ull_rz(double __a);\n" "__DEVICE__ unsigned long long __nv_double_as_longlong(double __a);\n" "__DEVICE__ double __nv_drcp_rd(double __a);\n" "__DEVICE__ double __nv_drcp_rn(double __a);\n" "__DEVICE__ double __nv_drcp_ru(double __a);\n" "__DEVICE__ double __nv_drcp_rz(double __a);\n" "__DEVICE__ double __nv_dsqrt_rd(double __a);\n" "__DEVICE__ double __nv_dsqrt_rn(double __a);\n" "__DEVICE__ double __nv_dsqrt_ru(double __a);\n" "__DEVICE__ double __nv_dsqrt_rz(double __a);\n" "__DEVICE__ double __nv_dsub_rd(double __a, double __b);\n" "__DEVICE__ double __nv_dsub_rn(double __a, double __b);\n" "__DEVICE__ double __nv_dsub_ru(double __a, double __b);\n" "__DEVICE__ double __nv_dsub_rz(double __a, double __b);\n" "__DEVICE__ double __nv_erfc(double __a);\n" "__DEVICE__ float __nv_erfcf(float __a);\n" "__DEVICE__ double __nv_erfcinv(double __a);\n" "__DEVICE__ float __nv_erfcinvf(float __a);\n" "__DEVICE__ double __nv_erfcx(double __a);\n" "__DEVICE__ float __nv_erfcxf(float __a);\n" "__DEVICE__ double __nv_erf(double __a);\n" "__DEVICE__ float __nv_erff(float __a);\n" "__DEVICE__ double __nv_erfinv(double __a);\n" "__DEVICE__ float __nv_erfinvf(float __a);\n" "__DEVICE__ double __nv_exp10(double __a);\n" "__DEVICE__ float __nv_exp10f(float __a);\n" "__DEVICE__ double __nv_exp2(double __a);\n" "__DEVICE__ float __nv_exp2f(float __a);\n" "__DEVICE__ double __nv_exp(double __a);\n" "__DEVICE__ float __nv_expf(float __a);\n" "__DEVICE__ double __nv_expm1(double __a);\n" "__DEVICE__ float __nv_expm1f(float __a);\n" "__DEVICE__ double __nv_fabs(double __a);\n" "__DEVICE__ float __nv_fabsf(float __a);\n" "__DEVICE__ float __nv_fadd_rd(float __a, float __b);\n" "__DEVICE__ float __nv_fadd_rn(float __a, float __b);\n" "__DEVICE__ float __nv_fadd_ru(float __a, float __b);\n" "__DEVICE__ float __nv_fadd_rz(float __a, float __b);\n" "__DEVICE__ float __nv_fast_cosf(float __a);\n" "__DEVICE__ float __nv_fast_exp10f(float __a);\n" "__DEVICE__ float __nv_fast_expf(float __a);\n" "__DEVICE__ float __nv_fast_fdividef(float __a, float __b);\n" "__DEVICE__ float __nv_fast_log10f(float __a);\n" "__DEVICE__ float __nv_fast_log2f(float __a);\n" "__DEVICE__ float __nv_fast_logf(float __a);\n" "__DEVICE__ float __nv_fast_powf(float __a, float __b);\n" "__DEVICE__ void __nv_fast_sincosf(float __a, float *__s, float *__c);\n" "__DEVICE__ float __nv_fast_sinf(float __a);\n" "__DEVICE__ float __nv_fast_tanf(float __a);\n" "__DEVICE__ double __nv_fdim(double __a, double __b);\n" "__DEVICE__ float __nv_fdimf(float __a, float __b);\n" "__DEVICE__ float __nv_fdiv_rd(float __a, float __b);\n" "__DEVICE__ float __nv_fdiv_rn(float __a, float __b);\n" "__DEVICE__ float __nv_fdiv_ru(float __a, float __b);\n" "__DEVICE__ float __nv_fdiv_rz(float __a, float __b);\n" "__DEVICE__ int __nv_ffs(int __a);\n" "__DEVICE__ int __nv_ffsll(long long __a);\n" "__DEVICE__ int __nv_finitef(float __a);\n" "__DEVICE__ unsigned short __nv_float2half_rn(float __a);\n" "__DEVICE__ int __nv_float2int_rd(float __a);\n" "__DEVICE__ int __nv_float2int_rn(float __a);\n" "__DEVICE__ int __nv_float2int_ru(float __a);\n" "__DEVICE__ int __nv_float2int_rz(float __a);\n" "__DEVICE__ long long __nv_float2ll_rd(float __a);\n" "__DEVICE__ long long __nv_float2ll_rn(float __a);\n" "__DEVICE__ long long __nv_float2ll_ru(float __a);\n" "__DEVICE__ long long __nv_float2ll_rz(float __a);\n" "__DEVICE__ unsigned int __nv_float2uint_rd(float __a);\n" "__DEVICE__ unsigned int __nv_float2uint_rn(float __a);\n" "__DEVICE__ unsigned int __nv_float2uint_ru(float __a);\n" "__DEVICE__ unsigned int __nv_float2uint_rz(float __a);\n" "__DEVICE__ unsigned long long __nv_float2ull_rd(float __a);\n" "__DEVICE__ unsigned long long __nv_float2ull_rn(float __a);\n" "__DEVICE__ unsigned long long __nv_float2ull_ru(float __a);\n" "__DEVICE__ unsigned long long __nv_float2ull_rz(float __a);\n" "__DEVICE__ int __nv_float_as_int(float __a);\n" "__DEVICE__ unsigned int __nv_float_as_uint(float __a);\n" "__DEVICE__ double __nv_floor(double __a);\n" "__DEVICE__ float __nv_floorf(float __a);\n" "__DEVICE__ double __nv_fma(double __a, double __b, double __c);\n" "__DEVICE__ float __nv_fmaf(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_rd(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_rn(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_ru(float __a, float __b, float __c);\n" "__DEVICE__ float __nv_fmaf_rz(float __a, float __b, float __c);\n" "__DEVICE__ double __nv_fma_rd(double __a, double __b, double __c);\n" "__DEVICE__ double __nv_fma_rn(double __a, double __b, double __c);\n" "__DEVICE__ double __nv_fma_ru(double __a, double __b, double __c);\n" "__DEVICE__ double __nv_fma_rz(double __a, double __b, double __c);\n" "__DEVICE__ double __nv_fmax(double __a, double __b);\n" "__DEVICE__ float __nv_fmaxf(float __a, float __b);\n" "__DEVICE__ double __nv_fmin(double __a, double __b);\n" "__DEVICE__ float __nv_fminf(float __a, float __b);\n" "__DEVICE__ double __nv_fmod(double __a, double __b);\n" "__DEVICE__ float __nv_fmodf(float __a, float __b);\n" "__DEVICE__ float __nv_fmul_rd(float __a, float __b);\n" "__DEVICE__ float __nv_fmul_rn(float __a, float __b);\n" "__DEVICE__ float __nv_fmul_ru(float __a, float __b);\n" "__DEVICE__ float __nv_fmul_rz(float __a, float __b);\n" "__DEVICE__ float __nv_frcp_rd(float __a);\n" "__DEVICE__ float __nv_frcp_rn(float __a);\n" "__DEVICE__ float __nv_frcp_ru(float __a);\n" "__DEVICE__ float __nv_frcp_rz(float __a);\n" "__DEVICE__ double __nv_frexp(double __a, int *__b);\n" "__DEVICE__ float __nv_frexpf(float __a, int *__b);\n" "__DEVICE__ float __nv_frsqrt_rn(float __a);\n" "__DEVICE__ float __nv_fsqrt_rd(float __a);\n" "__DEVICE__ float __nv_fsqrt_rn(float __a);\n" "__DEVICE__ float __nv_fsqrt_ru(float __a);\n" "__DEVICE__ float __nv_fsqrt_rz(float __a);\n" "__DEVICE__ float __nv_fsub_rd(float __a, float __b);\n" "__DEVICE__ float __nv_fsub_rn(float __a, float __b);\n" "__DEVICE__ float __nv_fsub_ru(float __a, float __b);\n" "__DEVICE__ float __nv_fsub_rz(float __a, float __b);\n" "__DEVICE__ int __nv_hadd(int __a, int __b);\n" "__DEVICE__ float __nv_half2float(unsigned short __h);\n" "__DEVICE__ double __nv_hiloint2double(int __a, int __b);\n" "__DEVICE__ double __nv_hypot(double __a, double __b);\n" "__DEVICE__ float __nv_hypotf(float __a, float __b);\n" "__DEVICE__ int __nv_ilogb(double __a);\n" "__DEVICE__ int __nv_ilogbf(float __a);\n" "__DEVICE__ double __nv_int2double_rn(int __a);\n" "__DEVICE__ float __nv_int2float_rd(int __a);\n" "__DEVICE__ float __nv_int2float_rn(int __a);\n" "__DEVICE__ float __nv_int2float_ru(int __a);\n" "__DEVICE__ float __nv_int2float_rz(int __a);\n" "__DEVICE__ float __nv_int_as_float(int __a);\n" "__DEVICE__ int __nv_isfinited(double __a);\n" "__DEVICE__ int __nv_isinfd(double __a);\n" "__DEVICE__ int __nv_isinff(float __a);\n" "__DEVICE__ int __nv_isnand(double __a);\n" "__DEVICE__ int __nv_isnanf(float __a);\n" "__DEVICE__ double __nv_j0(double __a);\n" "__DEVICE__ float __nv_j0f(float __a);\n" "__DEVICE__ double __nv_j1(double __a);\n" "__DEVICE__ float __nv_j1f(float __a);\n" "__DEVICE__ float __nv_jnf(int __a, float __b);\n" "__DEVICE__ double __nv_jn(int __a, double __b);\n" "__DEVICE__ double __nv_ldexp(double __a, int __b);\n" "__DEVICE__ float __nv_ldexpf(float __a, int __b);\n" "__DEVICE__ double __nv_lgamma(double __a);\n" "__DEVICE__ float __nv_lgammaf(float __a);\n" "__DEVICE__ double __nv_ll2double_rd(long long __a);\n" "__DEVICE__ double __nv_ll2double_rn(long long __a);\n" "__DEVICE__ double __nv_ll2double_ru(long long __a);\n" "__DEVICE__ double __nv_ll2double_rz(long long __a);\n" "__DEVICE__ float __nv_ll2float_rd(long long __a);\n" "__DEVICE__ float __nv_ll2float_rn(long long __a);\n" "__DEVICE__ float __nv_ll2float_ru(long long __a);\n" "__DEVICE__ float __nv_ll2float_rz(long long __a);\n" "__DEVICE__ long long __nv_llabs(long long __a);\n" "__DEVICE__ long long __nv_llmax(long long __a, long long __b);\n" "__DEVICE__ long long __nv_llmin(long long __a, long long __b);\n" "__DEVICE__ long long __nv_llrint(double __a);\n" "__DEVICE__ long long __nv_llrintf(float __a);\n" "__DEVICE__ long long __nv_llround(double __a);\n" "__DEVICE__ long long __nv_llroundf(float __a);\n" "__DEVICE__ double __nv_log10(double __a);\n" "__DEVICE__ float __nv_log10f(float __a);\n" "__DEVICE__ double __nv_log1p(double __a);\n" "__DEVICE__ float __nv_log1pf(float __a);\n" "__DEVICE__ double __nv_log2(double __a);\n" "__DEVICE__ float __nv_log2f(float __a);\n" "__DEVICE__ double __nv_logb(double __a);\n" "__DEVICE__ float __nv_logbf(float __a);\n" "__DEVICE__ double __nv_log(double __a);\n" "__DEVICE__ float __nv_logf(float __a);\n" "__DEVICE__ double __nv_longlong_as_double(long long __a);\n" "__DEVICE__ int __nv_max(int __a, int __b);\n" "__DEVICE__ int __nv_min(int __a, int __b);\n" "__DEVICE__ double __nv_modf(double __a, double *__b);\n" "__DEVICE__ float __nv_modff(float __a, float *__b);\n" "__DEVICE__ int __nv_mul24(int __a, int __b);\n" "__DEVICE__ long long __nv_mul64hi(long long __a, long long __b);\n" "__DEVICE__ int __nv_mulhi(int __a, int __b);\n" "__DEVICE__ double __nv_nan(const signed char *__a);\n" "__DEVICE__ float __nv_nanf(const signed char *__a);\n" "__DEVICE__ double __nv_nearbyint(double __a);\n" "__DEVICE__ float __nv_nearbyintf(float __a);\n" "__DEVICE__ double __nv_nextafter(double __a, double __b);\n" "__DEVICE__ float __nv_nextafterf(float __a, float __b);\n" "__DEVICE__ double __nv_norm3d(double __a, double __b, double __c);\n" "__DEVICE__ float __nv_norm3df(float __a, float __b, float __c);\n" "__DEVICE__ double __nv_norm4d(double __a, double __b, double __c, double __d);\n" "__DEVICE__ float __nv_norm4df(float __a, float __b, float __c, float __d);\n" "__DEVICE__ double __nv_normcdf(double __a);\n" "__DEVICE__ float __nv_normcdff(float __a);\n" "__DEVICE__ double __nv_normcdfinv(double __a);\n" "__DEVICE__ float __nv_normcdfinvf(float __a);\n" "__DEVICE__ float __nv_normf(int __a, const float *__b);\n" "__DEVICE__ double __nv_norm(int __a, const double *__b);\n" "__DEVICE__ int __nv_popc(int __a);\n" "__DEVICE__ int __nv_popcll(long long __a);\n" "__DEVICE__ double __nv_pow(double __a, double __b);\n" "__DEVICE__ float __nv_powf(float __a, float __b);\n" "__DEVICE__ double __nv_powi(double __a, int __b);\n" "__DEVICE__ float __nv_powif(float __a, int __b);\n" "__DEVICE__ double __nv_rcbrt(double __a);\n" "__DEVICE__ float __nv_rcbrtf(float __a);\n" "__DEVICE__ double __nv_rcp64h(double __a);\n" "__DEVICE__ double __nv_remainder(double __a, double __b);\n" "__DEVICE__ float __nv_remainderf(float __a, float __b);\n" "__DEVICE__ double __nv_remquo(double __a, double __b, int *__c);\n" "__DEVICE__ float __nv_remquof(float __a, float __b, int *__c);\n" "__DEVICE__ int __nv_rhadd(int __a, int __b);\n" "__DEVICE__ double __nv_rhypot(double __a, double __b);\n" "__DEVICE__ float __nv_rhypotf(float __a, float __b);\n" "__DEVICE__ double __nv_rint(double __a);\n" "__DEVICE__ float __nv_rintf(float __a);\n" "__DEVICE__ double __nv_rnorm3d(double __a, double __b, double __c);\n" "__DEVICE__ float __nv_rnorm3df(float __a, float __b, float __c);\n" "__DEVICE__ double __nv_rnorm4d(double __a, double __b, double __c, double __d);\n" "__DEVICE__ float __nv_rnorm4df(float __a, float __b, float __c, float __d);\n" "__DEVICE__ float __nv_rnormf(int __a, const float *__b);\n" "__DEVICE__ double __nv_rnorm(int __a, const double *__b);\n" "__DEVICE__ double __nv_round(double __a);\n" "__DEVICE__ float __nv_roundf(float __a);\n" "__DEVICE__ double __nv_rsqrt(double __a);\n" "__DEVICE__ float __nv_rsqrtf(float __a);\n" "__DEVICE__ int __nv_sad(int __a, int __b, int __c);\n" "__DEVICE__ float __nv_saturatef(float __a);\n" "__DEVICE__ double __nv_scalbn(double __a, int __b);\n" "__DEVICE__ float __nv_scalbnf(float __a, int __b);\n" "__DEVICE__ int __nv_signbitd(double __a);\n" "__DEVICE__ int __nv_signbitf(float __a);\n" "__DEVICE__ void __nv_sincos(double __a, double *__b, double *__c);\n" "__DEVICE__ void __nv_sincosf(float __a, float *__b, float *__c);\n" "__DEVICE__ void __nv_sincospi(double __a, double *__b, double *__c);\n" "__DEVICE__ void __nv_sincospif(float __a, float *__b, float *__c);\n" "__DEVICE__ double __nv_sin(double __a);\n" "__DEVICE__ float __nv_sinf(float __a);\n" "__DEVICE__ double __nv_sinh(double __a);\n" "__DEVICE__ float __nv_sinhf(float __a);\n" "__DEVICE__ double __nv_sinpi(double __a);\n" "__DEVICE__ float __nv_sinpif(float __a);\n" "__DEVICE__ double __nv_sqrt(double __a);\n" "__DEVICE__ float __nv_sqrtf(float __a);\n" "__DEVICE__ double __nv_tan(double __a);\n" "__DEVICE__ float __nv_tanf(float __a);\n" "__DEVICE__ double __nv_tanh(double __a);\n" "__DEVICE__ float __nv_tanhf(float __a);\n" "__DEVICE__ double __nv_tgamma(double __a);\n" "__DEVICE__ float __nv_tgammaf(float __a);\n" "__DEVICE__ double __nv_trunc(double __a);\n" "__DEVICE__ float __nv_truncf(float __a);\n" "__DEVICE__ int __nv_uhadd(unsigned int __a, unsigned int __b);\n" "__DEVICE__ double __nv_uint2double_rn(unsigned int __i);\n" "__DEVICE__ float __nv_uint2float_rd(unsigned int __a);\n" "__DEVICE__ float __nv_uint2float_rn(unsigned int __a);\n" "__DEVICE__ float __nv_uint2float_ru(unsigned int __a);\n" "__DEVICE__ float __nv_uint2float_rz(unsigned int __a);\n" "__DEVICE__ float __nv_uint_as_float(unsigned int __a);\n" "__DEVICE__ double __nv_ull2double_rd(unsigned long long __a);\n" "__DEVICE__ double __nv_ull2double_rn(unsigned long long __a);\n" "__DEVICE__ double __nv_ull2double_ru(unsigned long long __a);\n" "__DEVICE__ double __nv_ull2double_rz(unsigned long long __a);\n" "__DEVICE__ float __nv_ull2float_rd(unsigned long long __a);\n" "__DEVICE__ float __nv_ull2float_rn(unsigned long long __a);\n" "__DEVICE__ float __nv_ull2float_ru(unsigned long long __a);\n" "__DEVICE__ float __nv_ull2float_rz(unsigned long long __a);\n" "__DEVICE__ unsigned long long __nv_ullmax(unsigned long long __a,\n" " unsigned long long __b);\n" "__DEVICE__ unsigned long long __nv_ullmin(unsigned long long __a,\n" " unsigned long long __b);\n" "__DEVICE__ unsigned int __nv_umax(unsigned int __a, unsigned int __b);\n" "__DEVICE__ unsigned int __nv_umin(unsigned int __a, unsigned int __b);\n" "__DEVICE__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b);\n" "__DEVICE__ unsigned long long __nv_umul64hi(unsigned long long __a,\n" " unsigned long long __b);\n" "__DEVICE__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b);\n" "__DEVICE__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b);\n" "__DEVICE__ unsigned int __nv_usad(unsigned int __a, unsigned int __b,\n" " unsigned int __c);\n" "#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020\n" "__DEVICE__ int __nv_vabs2(int __a);\n" "__DEVICE__ int __nv_vabs4(int __a);\n" "__DEVICE__ int __nv_vabsdiffs2(int __a, int __b);\n" "__DEVICE__ int __nv_vabsdiffs4(int __a, int __b);\n" "__DEVICE__ int __nv_vabsdiffu2(int __a, int __b);\n" "__DEVICE__ int __nv_vabsdiffu4(int __a, int __b);\n" "__DEVICE__ int __nv_vabsss2(int __a);\n" "__DEVICE__ int __nv_vabsss4(int __a);\n" "__DEVICE__ int __nv_vadd2(int __a, int __b);\n" "__DEVICE__ int __nv_vadd4(int __a, int __b);\n" "__DEVICE__ int __nv_vaddss2(int __a, int __b);\n" "__DEVICE__ int __nv_vaddss4(int __a, int __b);\n" "__DEVICE__ int __nv_vaddus2(int __a, int __b);\n" "__DEVICE__ int __nv_vaddus4(int __a, int __b);\n" "__DEVICE__ int __nv_vavgs2(int __a, int __b);\n" "__DEVICE__ int __nv_vavgs4(int __a, int __b);\n" "__DEVICE__ int __nv_vavgu2(int __a, int __b);\n" "__DEVICE__ int __nv_vavgu4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpeq2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpeq4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpges2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpges4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgeu2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgeu4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgts2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgts4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgtu2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpgtu4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmples2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmples4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpleu2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpleu4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmplts2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmplts4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpltu2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpltu4(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpne2(int __a, int __b);\n" "__DEVICE__ int __nv_vcmpne4(int __a, int __b);\n" "__DEVICE__ int __nv_vhaddu2(int __a, int __b);\n" "__DEVICE__ int __nv_vhaddu4(int __a, int __b);\n" "__DEVICE__ int __nv_vmaxs2(int __a, int __b);\n" "__DEVICE__ int __nv_vmaxs4(int __a, int __b);\n" "__DEVICE__ int __nv_vmaxu2(int __a, int __b);\n" "__DEVICE__ int __nv_vmaxu4(int __a, int __b);\n" "__DEVICE__ int __nv_vmins2(int __a, int __b);\n" "__DEVICE__ int __nv_vmins4(int __a, int __b);\n" "__DEVICE__ int __nv_vminu2(int __a, int __b);\n" "__DEVICE__ int __nv_vminu4(int __a, int __b);\n" "__DEVICE__ int __nv_vneg2(int __a);\n" "__DEVICE__ int __nv_vneg4(int __a);\n" "__DEVICE__ int __nv_vnegss2(int __a);\n" "__DEVICE__ int __nv_vnegss4(int __a);\n" "__DEVICE__ int __nv_vsads2(int __a, int __b);\n" "__DEVICE__ int __nv_vsads4(int __a, int __b);\n" "__DEVICE__ int __nv_vsadu2(int __a, int __b);\n" "__DEVICE__ int __nv_vsadu4(int __a, int __b);\n" "__DEVICE__ int __nv_vseteq2(int __a, int __b);\n" "__DEVICE__ int __nv_vseteq4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetges2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetges4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgeu2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgeu4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgts2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgts4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgtu2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetgtu4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetles2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetles4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetleu2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetleu4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetlts2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetlts4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetltu2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetltu4(int __a, int __b);\n" "__DEVICE__ int __nv_vsetne2(int __a, int __b);\n" "__DEVICE__ int __nv_vsetne4(int __a, int __b);\n" "__DEVICE__ int __nv_vsub2(int __a, int __b);\n" "__DEVICE__ int __nv_vsub4(int __a, int __b);\n" "__DEVICE__ int __nv_vsubss2(int __a, int __b);\n" "__DEVICE__ int __nv_vsubss4(int __a, int __b);\n" "__DEVICE__ int __nv_vsubus2(int __a, int __b);\n" "__DEVICE__ int __nv_vsubus4(int __a, int __b);\n" "#endif // CUDA_VERSION\n" "__DEVICE__ double __nv_y0(double __a);\n" "__DEVICE__ float __nv_y0f(float __a);\n" "__DEVICE__ double __nv_y1(double __a);\n" "__DEVICE__ float __nv_y1f(float __a);\n" "__DEVICE__ float __nv_ynf(int __a, float __b);\n" "__DEVICE__ double __nv_yn(int __a, double __b);\n" "\n" "#if defined(__OPENMP_NVPTX__)\n" "#pragma omp end assumes ext_spmd_amenable no_openmp\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "#endif // __CLANG_CUDA_LIBDEVICE_DECLARES_H__\n" "" } , { "/builtins/__clang_cuda_math.h" , "/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG_CUDA_MATH_H__\n" "#define __CLANG_CUDA_MATH_H__\n" "#ifndef __CUDA__\n" "#error \"This file is for CUDA compilation only.\"\n" "#endif\n" "\n" "#ifndef __OPENMP_NVPTX__\n" "#if CUDA_VERSION < 9000\n" "#error This file is intended to be used with CUDA-9+ only.\n" "#endif\n" "#endif\n" "\n" "// __DEVICE__ is a helper macro with common set of attributes for the wrappers\n" "// we implement in this file. We need static in order to avoid emitting unused\n" "// functions and __forceinline__ helps inlining these wrappers at -O1.\n" "#pragma push_macro(\"__DEVICE__\")\n" "#ifdef __OPENMP_NVPTX__\n" "#if defined(__cplusplus)\n" "#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE__ static __attribute__((always_inline, nothrow))\n" "#endif\n" "#else\n" "#define __DEVICE__ static __device__ __forceinline__\n" "#endif\n" "\n" "// Specialized version of __DEVICE__ for functions with void return type. Needed\n" "// because the OpenMP overlay requires constexpr functions here but prior to\n" "// c++14 void return functions could not be constexpr.\n" "#pragma push_macro(\"__DEVICE_VOID__\")\n" "#if defined(__OPENMP_NVPTX__) && defined(__cplusplus) && __cplusplus < 201402L\n" "#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE_VOID__ __DEVICE__\n" "#endif\n" "\n" "// libdevice provides fast low precision and slow full-recision implementations\n" "// for some functions. Which one gets selected depends on\n" "// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if\n" "// -ffast-math or -fcuda-approx-transcendentals are in effect.\n" "#pragma push_macro(\"__FAST_OR_SLOW\")\n" "#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)\n" "#define __FAST_OR_SLOW(fast, slow) fast\n" "#else\n" "#define __FAST_OR_SLOW(fast, slow) slow\n" "#endif\n" "\n" "__DEVICE__ int abs(int __a) { return __nv_abs(__a); }\n" "__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }\n" "__DEVICE__ double acos(double __a) { return __nv_acos(__a); }\n" "__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }\n" "__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }\n" "__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }\n" "__DEVICE__ double asin(double __a) { return __nv_asin(__a); }\n" "__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }\n" "__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }\n" "__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }\n" "__DEVICE__ double atan(double __a) { return __nv_atan(__a); }\n" "__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }\n" "__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }\n" "__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }\n" "__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }\n" "__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }\n" "__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }\n" "__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }\n" "__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }\n" "__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }\n" "__DEVICE__ double copysign(double __a, double __b) {\n" " return __nv_copysign(__a, __b);\n" "}\n" "__DEVICE__ float copysignf(float __a, float __b) {\n" " return __nv_copysignf(__a, __b);\n" "}\n" "__DEVICE__ double cos(double __a) { return __nv_cos(__a); }\n" "__DEVICE__ float cosf(float __a) {\n" " return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);\n" "}\n" "__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }\n" "__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }\n" "__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }\n" "__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }\n" "__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }\n" "__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }\n" "__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }\n" "__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }\n" "__DEVICE__ double erf(double __a) { return __nv_erf(__a); }\n" "__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }\n" "__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }\n" "__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }\n" "__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }\n" "__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }\n" "__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }\n" "__DEVICE__ float erff(float __a) { return __nv_erff(__a); }\n" "__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }\n" "__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }\n" "__DEVICE__ double exp(double __a) { return __nv_exp(__a); }\n" "__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }\n" "__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }\n" "__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }\n" "__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }\n" "__DEVICE__ float expf(float __a) { return __nv_expf(__a); }\n" "__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }\n" "__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }\n" "__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }\n" "__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }\n" "__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }\n" "__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }\n" "__DEVICE__ float fdividef(float __a, float __b) {\n" "#if __FAST_MATH__ && !__CUDA_PREC_DIV\n" " return __nv_fast_fdividef(__a, __b);\n" "#else\n" " return __a / __b;\n" "#endif\n" "}\n" "__DEVICE__ double floor(double __f) { return __nv_floor(__f); }\n" "__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }\n" "__DEVICE__ double fma(double __a, double __b, double __c) {\n" " return __nv_fma(__a, __b, __c);\n" "}\n" "__DEVICE__ float fmaf(float __a, float __b, float __c) {\n" " return __nv_fmaf(__a, __b, __c);\n" "}\n" "__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }\n" "__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }\n" "__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }\n" "__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }\n" "__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }\n" "__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }\n" "__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }\n" "__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }\n" "__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }\n" "__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }\n" "__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }\n" "__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }\n" "__DEVICE__ double j0(double __a) { return __nv_j0(__a); }\n" "__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }\n" "__DEVICE__ double j1(double __a) { return __nv_j1(__a); }\n" "__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }\n" "__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }\n" "__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }\n" "#if defined(__LP64__) || defined(_WIN64)\n" "__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };\n" "#else\n" "__DEVICE__ long labs(long __a) { return __nv_abs(__a); };\n" "#endif\n" "__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }\n" "__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }\n" "__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }\n" "__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }\n" "__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }\n" "__DEVICE__ long long llmax(long long __a, long long __b) {\n" " return __nv_llmax(__a, __b);\n" "}\n" "__DEVICE__ long long llmin(long long __a, long long __b) {\n" " return __nv_llmin(__a, __b);\n" "}\n" "__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }\n" "__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }\n" "__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }\n" "__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }\n" "__DEVICE__ double round(double __a) { return __nv_round(__a); }\n" "__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }\n" "__DEVICE__ double log(double __a) { return __nv_log(__a); }\n" "__DEVICE__ double log10(double __a) { return __nv_log10(__a); }\n" "__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }\n" "__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }\n" "__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }\n" "__DEVICE__ double log2(double __a) { return __nv_log2(__a); }\n" "__DEVICE__ float log2f(float __a) {\n" " return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);\n" "}\n" "__DEVICE__ double logb(double __a) { return __nv_logb(__a); }\n" "__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }\n" "__DEVICE__ float logf(float __a) {\n" " return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);\n" "}\n" "#if defined(__LP64__) || defined(_WIN64)\n" "__DEVICE__ long lrint(double __a) { return llrint(__a); }\n" "__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }\n" "__DEVICE__ long lround(double __a) { return llround(__a); }\n" "__DEVICE__ long lroundf(float __a) { return llroundf(__a); }\n" "#else\n" "__DEVICE__ long lrint(double __a) { return (long)rint(__a); }\n" "__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }\n" "__DEVICE__ long lround(double __a) { return round(__a); }\n" "__DEVICE__ long lroundf(float __a) { return roundf(__a); }\n" "#endif\n" "__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }\n" "__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }\n" "__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }\n" "__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }\n" "__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }\n" "__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }\n" "__DEVICE__ double nextafter(double __a, double __b) {\n" " return __nv_nextafter(__a, __b);\n" "}\n" "__DEVICE__ float nextafterf(float __a, float __b) {\n" " return __nv_nextafterf(__a, __b);\n" "}\n" "__DEVICE__ double norm(int __dim, const double *__t) {\n" " return __nv_norm(__dim, __t);\n" "}\n" "__DEVICE__ double norm3d(double __a, double __b, double __c) {\n" " return __nv_norm3d(__a, __b, __c);\n" "}\n" "__DEVICE__ float norm3df(float __a, float __b, float __c) {\n" " return __nv_norm3df(__a, __b, __c);\n" "}\n" "__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {\n" " return __nv_norm4d(__a, __b, __c, __d);\n" "}\n" "__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {\n" " return __nv_norm4df(__a, __b, __c, __d);\n" "}\n" "__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }\n" "__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }\n" "__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }\n" "__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }\n" "__DEVICE__ float normf(int __dim, const float *__t) {\n" " return __nv_normf(__dim, __t);\n" "}\n" "__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }\n" "__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }\n" "__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }\n" "__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }\n" "__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }\n" "__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }\n" "__DEVICE__ double remainder(double __a, double __b) {\n" " return __nv_remainder(__a, __b);\n" "}\n" "__DEVICE__ float remainderf(float __a, float __b) {\n" " return __nv_remainderf(__a, __b);\n" "}\n" "__DEVICE__ double remquo(double __a, double __b, int *__c) {\n" " return __nv_remquo(__a, __b, __c);\n" "}\n" "__DEVICE__ float remquof(float __a, float __b, int *__c) {\n" " return __nv_remquof(__a, __b, __c);\n" "}\n" "__DEVICE__ double rhypot(double __a, double __b) {\n" " return __nv_rhypot(__a, __b);\n" "}\n" "__DEVICE__ float rhypotf(float __a, float __b) {\n" " return __nv_rhypotf(__a, __b);\n" "}\n" "// __nv_rint* in libdevice is buggy and produces incorrect results.\n" "__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }\n" "__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }\n" "__DEVICE__ double rnorm(int __a, const double *__b) {\n" " return __nv_rnorm(__a, __b);\n" "}\n" "__DEVICE__ double rnorm3d(double __a, double __b, double __c) {\n" " return __nv_rnorm3d(__a, __b, __c);\n" "}\n" "__DEVICE__ float rnorm3df(float __a, float __b, float __c) {\n" " return __nv_rnorm3df(__a, __b, __c);\n" "}\n" "__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {\n" " return __nv_rnorm4d(__a, __b, __c, __d);\n" "}\n" "__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {\n" " return __nv_rnorm4df(__a, __b, __c, __d);\n" "}\n" "__DEVICE__ float rnormf(int __dim, const float *__t) {\n" " return __nv_rnormf(__dim, __t);\n" "}\n" "__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }\n" "__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }\n" "__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }\n" "__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }\n" "__DEVICE__ double scalbln(double __a, long __b) {\n" " if (__b > INT_MAX)\n" " return __a > 0 ? HUGE_VAL : -HUGE_VAL;\n" " if (__b < INT_MIN)\n" " return __a > 0 ? 0.0 : -0.0;\n" " return scalbn(__a, (int)__b);\n" "}\n" "__DEVICE__ float scalblnf(float __a, long __b) {\n" " if (__b > INT_MAX)\n" " return __a > 0 ? HUGE_VALF : -HUGE_VALF;\n" " if (__b < INT_MIN)\n" " return __a > 0 ? 0.f : -0.f;\n" " return scalbnf(__a, (int)__b);\n" "}\n" "__DEVICE__ double sin(double __a) { return __nv_sin(__a); }\n" "__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {\n" " return __nv_sincos(__a, __s, __c);\n" "}\n" "__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {\n" " return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);\n" "}\n" "__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {\n" " return __nv_sincospi(__a, __s, __c);\n" "}\n" "__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {\n" " return __nv_sincospif(__a, __s, __c);\n" "}\n" "__DEVICE__ float sinf(float __a) {\n" " return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);\n" "}\n" "__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }\n" "__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }\n" "__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }\n" "__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }\n" "__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }\n" "__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }\n" "__DEVICE__ double tan(double __a) { return __nv_tan(__a); }\n" "__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }\n" "__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }\n" "__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }\n" "__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }\n" "__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }\n" "__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }\n" "__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }\n" "__DEVICE__ unsigned long long ullmax(unsigned long long __a,\n" " unsigned long long __b) {\n" " return __nv_ullmax(__a, __b);\n" "}\n" "__DEVICE__ unsigned long long ullmin(unsigned long long __a,\n" " unsigned long long __b) {\n" " return __nv_ullmin(__a, __b);\n" "}\n" "__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {\n" " return __nv_umax(__a, __b);\n" "}\n" "__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {\n" " return __nv_umin(__a, __b);\n" "}\n" "__DEVICE__ double y0(double __a) { return __nv_y0(__a); }\n" "__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }\n" "__DEVICE__ double y1(double __a) { return __nv_y1(__a); }\n" "__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }\n" "__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }\n" "__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "#pragma pop_macro(\"__DEVICE_VOID__\")\n" "#pragma pop_macro(\"__FAST_OR_SLOW\")\n" "\n" "#endif // __CLANG_CUDA_MATH_H__\n" "" } , { "/builtins/__clang_cuda_math_forward_declares.h" , "/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__\n" "#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__\n" "#if !defined(__CUDA__) && !__HIP__\n" "#error \"This file is for CUDA/HIP compilation only.\"\n" "#endif\n" "\n" "// This file forward-declares of some math functions we (or the CUDA headers)\n" "// will define later. We need to do this, and do it before cmath is included,\n" "// because the standard library may have constexpr math functions. In the\n" "// absence of a prior __device__ decl, those constexpr functions may become\n" "// implicitly host+device. host+device functions can't be overloaded, so that\n" "// would preclude the use of our own __device__ overloads for these functions.\n" "\n" "#pragma push_macro(\"__DEVICE__\")\n" "#define __DEVICE__ \\\n" " static __inline__ __attribute__((always_inline)) __attribute__((device))\n" "\n" "__DEVICE__ long abs(long);\n" "__DEVICE__ long long abs(long long);\n" "__DEVICE__ double abs(double);\n" "__DEVICE__ float abs(float);\n" "__DEVICE__ int abs(int);\n" "__DEVICE__ double acos(double);\n" "__DEVICE__ float acos(float);\n" "__DEVICE__ double acosh(double);\n" "__DEVICE__ float acosh(float);\n" "__DEVICE__ double asin(double);\n" "__DEVICE__ float asin(float);\n" "__DEVICE__ double asinh(double);\n" "__DEVICE__ float asinh(float);\n" "__DEVICE__ double atan2(double, double);\n" "__DEVICE__ float atan2(float, float);\n" "__DEVICE__ double atan(double);\n" "__DEVICE__ float atan(float);\n" "__DEVICE__ double atanh(double);\n" "__DEVICE__ float atanh(float);\n" "__DEVICE__ double cbrt(double);\n" "__DEVICE__ float cbrt(float);\n" "__DEVICE__ double ceil(double);\n" "__DEVICE__ float ceil(float);\n" "__DEVICE__ double copysign(double, double);\n" "__DEVICE__ float copysign(float, float);\n" "__DEVICE__ double cos(double);\n" "__DEVICE__ float cos(float);\n" "__DEVICE__ double cosh(double);\n" "__DEVICE__ float cosh(float);\n" "__DEVICE__ double erfc(double);\n" "__DEVICE__ float erfc(float);\n" "__DEVICE__ double erf(double);\n" "__DEVICE__ float erf(float);\n" "__DEVICE__ double exp2(double);\n" "__DEVICE__ float exp2(float);\n" "__DEVICE__ double exp(double);\n" "__DEVICE__ float exp(float);\n" "__DEVICE__ double expm1(double);\n" "__DEVICE__ float expm1(float);\n" "__DEVICE__ double fabs(double);\n" "__DEVICE__ float fabs(float);\n" "__DEVICE__ double fdim(double, double);\n" "__DEVICE__ float fdim(float, float);\n" "__DEVICE__ double floor(double);\n" "__DEVICE__ float floor(float);\n" "__DEVICE__ double fma(double, double, double);\n" "__DEVICE__ float fma(float, float, float);\n" "__DEVICE__ double fmax(double, double);\n" "__DEVICE__ float fmax(float, float);\n" "__DEVICE__ double fmin(double, double);\n" "__DEVICE__ float fmin(float, float);\n" "__DEVICE__ double fmod(double, double);\n" "__DEVICE__ float fmod(float, float);\n" "__DEVICE__ int fpclassify(double);\n" "__DEVICE__ int fpclassify(float);\n" "__DEVICE__ double frexp(double, int *);\n" "__DEVICE__ float frexp(float, int *);\n" "__DEVICE__ double hypot(double, double);\n" "__DEVICE__ float hypot(float, float);\n" "__DEVICE__ int ilogb(double);\n" "__DEVICE__ int ilogb(float);\n" "#ifdef _MSC_VER\n" "__DEVICE__ bool isfinite(long double);\n" "#endif\n" "__DEVICE__ bool isfinite(double);\n" "__DEVICE__ bool isfinite(float);\n" "__DEVICE__ bool isgreater(double, double);\n" "__DEVICE__ bool isgreaterequal(double, double);\n" "__DEVICE__ bool isgreaterequal(float, float);\n" "__DEVICE__ bool isgreater(float, float);\n" "#ifdef _MSC_VER\n" "__DEVICE__ bool isinf(long double);\n" "#endif\n" "__DEVICE__ bool isinf(double);\n" "__DEVICE__ bool isinf(float);\n" "__DEVICE__ bool isless(double, double);\n" "__DEVICE__ bool islessequal(double, double);\n" "__DEVICE__ bool islessequal(float, float);\n" "__DEVICE__ bool isless(float, float);\n" "__DEVICE__ bool islessgreater(double, double);\n" "__DEVICE__ bool islessgreater(float, float);\n" "#ifdef _MSC_VER\n" "__DEVICE__ bool isnan(long double);\n" "#endif\n" "__DEVICE__ bool isnan(double);\n" "__DEVICE__ bool isnan(float);\n" "__DEVICE__ bool isnormal(double);\n" "__DEVICE__ bool isnormal(float);\n" "__DEVICE__ bool isunordered(double, double);\n" "__DEVICE__ bool isunordered(float, float);\n" "__DEVICE__ long labs(long);\n" "__DEVICE__ double ldexp(double, int);\n" "__DEVICE__ float ldexp(float, int);\n" "__DEVICE__ double lgamma(double);\n" "__DEVICE__ float lgamma(float);\n" "__DEVICE__ long long llabs(long long);\n" "__DEVICE__ long long llrint(double);\n" "__DEVICE__ long long llrint(float);\n" "__DEVICE__ double log10(double);\n" "__DEVICE__ float log10(float);\n" "__DEVICE__ double log1p(double);\n" "__DEVICE__ float log1p(float);\n" "__DEVICE__ double log2(double);\n" "__DEVICE__ float log2(float);\n" "__DEVICE__ double logb(double);\n" "__DEVICE__ float logb(float);\n" "__DEVICE__ double log(double);\n" "__DEVICE__ float log(float);\n" "__DEVICE__ long lrint(double);\n" "__DEVICE__ long lrint(float);\n" "__DEVICE__ long lround(double);\n" "__DEVICE__ long lround(float);\n" "__DEVICE__ long long llround(float); // No llround(double).\n" "__DEVICE__ double modf(double, double *);\n" "__DEVICE__ float modf(float, float *);\n" "__DEVICE__ double nan(const char *);\n" "__DEVICE__ float nanf(const char *);\n" "__DEVICE__ double nearbyint(double);\n" "__DEVICE__ float nearbyint(float);\n" "__DEVICE__ double nextafter(double, double);\n" "__DEVICE__ float nextafter(float, float);\n" "__DEVICE__ double pow(double, double);\n" "__DEVICE__ double pow(double, int);\n" "__DEVICE__ float pow(float, float);\n" "__DEVICE__ float pow(float, int);\n" "__DEVICE__ double remainder(double, double);\n" "__DEVICE__ float remainder(float, float);\n" "__DEVICE__ double remquo(double, double, int *);\n" "__DEVICE__ float remquo(float, float, int *);\n" "__DEVICE__ double rint(double);\n" "__DEVICE__ float rint(float);\n" "__DEVICE__ double round(double);\n" "__DEVICE__ float round(float);\n" "__DEVICE__ double scalbln(double, long);\n" "__DEVICE__ float scalbln(float, long);\n" "__DEVICE__ double scalbn(double, int);\n" "__DEVICE__ float scalbn(float, int);\n" "#ifdef _MSC_VER\n" "__DEVICE__ bool signbit(long double);\n" "#endif\n" "__DEVICE__ bool signbit(double);\n" "__DEVICE__ bool signbit(float);\n" "__DEVICE__ double sin(double);\n" "__DEVICE__ float sin(float);\n" "__DEVICE__ double sinh(double);\n" "__DEVICE__ float sinh(float);\n" "__DEVICE__ double sqrt(double);\n" "__DEVICE__ float sqrt(float);\n" "__DEVICE__ double tan(double);\n" "__DEVICE__ float tan(float);\n" "__DEVICE__ double tanh(double);\n" "__DEVICE__ float tanh(float);\n" "__DEVICE__ double tgamma(double);\n" "__DEVICE__ float tgamma(float);\n" "__DEVICE__ double trunc(double);\n" "__DEVICE__ float trunc(float);\n" "\n" "// Notably missing above is nexttoward, which we don't define on\n" "// the device side because libdevice doesn't give us an implementation, and we\n" "// don't want to be in the business of writing one ourselves.\n" "\n" "// We need to define these overloads in exactly the namespace our standard\n" "// library uses (including the right inline namespace), otherwise they won't be\n" "// picked up by other functions in the standard library (e.g. functions in\n" "// ). Thus the ugliness below.\n" "#ifdef _LIBCPP_BEGIN_NAMESPACE_STD\n" "_LIBCPP_BEGIN_NAMESPACE_STD\n" "#else\n" "namespace std {\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "#endif\n" "#endif\n" "\n" "using ::abs;\n" "using ::acos;\n" "using ::acosh;\n" "using ::asin;\n" "using ::asinh;\n" "using ::atan;\n" "using ::atan2;\n" "using ::atanh;\n" "using ::cbrt;\n" "using ::ceil;\n" "using ::copysign;\n" "using ::cos;\n" "using ::cosh;\n" "using ::erf;\n" "using ::erfc;\n" "using ::exp;\n" "using ::exp2;\n" "using ::expm1;\n" "using ::fabs;\n" "using ::fdim;\n" "using ::floor;\n" "using ::fma;\n" "using ::fmax;\n" "using ::fmin;\n" "using ::fmod;\n" "using ::fpclassify;\n" "using ::frexp;\n" "using ::hypot;\n" "using ::ilogb;\n" "using ::isfinite;\n" "using ::isgreater;\n" "using ::isgreaterequal;\n" "using ::isinf;\n" "using ::isless;\n" "using ::islessequal;\n" "using ::islessgreater;\n" "using ::isnan;\n" "using ::isnormal;\n" "using ::isunordered;\n" "using ::labs;\n" "using ::ldexp;\n" "using ::lgamma;\n" "using ::llabs;\n" "using ::llrint;\n" "using ::log;\n" "using ::log10;\n" "using ::log1p;\n" "using ::log2;\n" "using ::logb;\n" "using ::lrint;\n" "using ::lround;\n" "using ::llround;\n" "using ::modf;\n" "using ::nan;\n" "using ::nanf;\n" "using ::nearbyint;\n" "using ::nextafter;\n" "using ::pow;\n" "using ::remainder;\n" "using ::remquo;\n" "using ::rint;\n" "using ::round;\n" "using ::scalbln;\n" "using ::scalbn;\n" "using ::signbit;\n" "using ::sin;\n" "using ::sinh;\n" "using ::sqrt;\n" "using ::tan;\n" "using ::tanh;\n" "using ::tgamma;\n" "using ::trunc;\n" "\n" "#ifdef _LIBCPP_END_NAMESPACE_STD\n" "_LIBCPP_END_NAMESPACE_STD\n" "#else\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_END_NAMESPACE_VERSION\n" "#endif\n" "} // namespace std\n" "#endif\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "\n" "#endif\n" "" } , { "/builtins/__clang_cuda_runtime_wrapper.h" , "/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/*\n" " * WARNING: This header is intended to be directly -include'd by\n" " * the compiler and is not supposed to be included by users.\n" " *\n" " * CUDA headers are implemented in a way that currently makes it\n" " * impossible for user code to #include directly when compiling with\n" " * Clang. They present different view of CUDA-supplied functions\n" " * depending on where in NVCC's compilation pipeline the headers are\n" " * included. Neither of these modes provides function definitions with\n" " * correct attributes, so we use preprocessor to force the headers\n" " * into a form that Clang can use.\n" " *\n" " * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's\n" " * this file during every CUDA compilation.\n" " */\n" "\n" "#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__\n" "#define __CLANG_CUDA_RUNTIME_WRAPPER_H__\n" "\n" "#if defined(__CUDA__) && defined(__clang__)\n" "\n" "// Include some forward declares that must come before cmath.\n" "#include <__clang_cuda_math_forward_declares.h>\n" "\n" "// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions\n" "// enabled depend on it to avoid using __float128, which is unsupported in\n" "// CUDA.\n" "#define __CUDACC__\n" "\n" "// Include some standard headers to avoid CUDA headers including them\n" "// while some required macros (like __THROW) are in a weird state.\n" "#include \n" "#include \n" "#include \n" "#include \n" "#undef __CUDACC__\n" "\n" "// Preserve common macros that will be changed below by us or by CUDA\n" "// headers.\n" "#pragma push_macro(\"__THROW\")\n" "#pragma push_macro(\"__CUDA_ARCH__\")\n" "\n" "// WARNING: Preprocessor hacks below are based on specific details of\n" "// CUDA-7.x headers and are not expected to work with any other\n" "// version of CUDA headers.\n" "#include \"cuda.h\"\n" "#if !defined(CUDA_VERSION)\n" "#error \"cuda.h did not define CUDA_VERSION\"\n" "#elif CUDA_VERSION < 7000\n" "#error \"Unsupported CUDA version!\"\n" "#endif\n" "\n" "#pragma push_macro(\"__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__\")\n" "#if CUDA_VERSION >= 10000\n" "#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__\n" "#endif\n" "\n" "// Make largest subset of device functions available during host\n" "// compilation.\n" "#ifndef __CUDA_ARCH__\n" "#define __CUDA_ARCH__ 9999\n" "#endif\n" "\n" "#include \"__clang_cuda_builtin_vars.h\"\n" "\n" "// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above\n" "// has taken care of builtin variables declared in the file.\n" "#define __DEVICE_LAUNCH_PARAMETERS_H__\n" "\n" "// {math,device}_functions.h only have declarations of the\n" "// functions. We don't need them as we're going to pull in their\n" "// definitions from .hpp files.\n" "#define __DEVICE_FUNCTIONS_H__\n" "#define __MATH_FUNCTIONS_H__\n" "#define __COMMON_FUNCTIONS_H__\n" "// device_functions_decls is replaced by __clang_cuda_device_functions.h\n" "// included below.\n" "#define __DEVICE_FUNCTIONS_DECLS_H__\n" "\n" "#undef __CUDACC__\n" "#if CUDA_VERSION < 9000\n" "#define __CUDABE__\n" "#else\n" "#define __CUDACC__\n" "#define __CUDA_LIBDEVICE__\n" "#endif\n" "// Disables definitions of device-side runtime support stubs in\n" "// cuda_device_runtime_api.h\n" "#include \"host_defines.h\"\n" "#undef __CUDACC__\n" "#include \"driver_types.h\"\n" "#include \"host_config.h\"\n" "\n" "// Temporarily replace \"nv_weak\" with weak, so __attribute__((nv_weak)) in\n" "// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the\n" "// functional equivalent of what we need.\n" "#pragma push_macro(\"nv_weak\")\n" "#define nv_weak weak\n" "#undef __CUDABE__\n" "#undef __CUDA_LIBDEVICE__\n" "#define __CUDACC__\n" "#include \"cuda_runtime.h\"\n" "\n" "#pragma pop_macro(\"nv_weak\")\n" "#undef __CUDACC__\n" "#define __CUDABE__\n" "\n" "// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does\n" "// not have at the moment. Emulate them with a builtin memcpy/memset.\n" "#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)\n" "#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)\n" "\n" "#if CUDA_VERSION < 9000\n" "#include \"crt/device_runtime.h\"\n" "#endif\n" "#include \"crt/host_runtime.h\"\n" "// device_runtime.h defines __cxa_* macros that will conflict with\n" "// cxxabi.h.\n" "// FIXME: redefine these as __device__ functions.\n" "#undef __cxa_vec_ctor\n" "#undef __cxa_vec_cctor\n" "#undef __cxa_vec_dtor\n" "#undef __cxa_vec_new\n" "#undef __cxa_vec_new2\n" "#undef __cxa_vec_new3\n" "#undef __cxa_vec_delete2\n" "#undef __cxa_vec_delete\n" "#undef __cxa_vec_delete3\n" "#undef __cxa_pure_virtual\n" "\n" "// math_functions.hpp expects this host function be defined on MacOS, but it\n" "// ends up not being there because of the games we play here. Just define it\n" "// ourselves; it's simple enough.\n" "#ifdef __APPLE__\n" "inline __host__ double __signbitd(double x) {\n" " return std::signbit(x);\n" "}\n" "#endif\n" "\n" "// CUDA 9.1 no longer provides declarations for libdevice functions, so we need\n" "// to provide our own.\n" "#include <__clang_cuda_libdevice_declares.h>\n" "\n" "// Wrappers for many device-side standard library functions, incl. math\n" "// functions, became compiler builtins in CUDA-9 and have been removed from the\n" "// CUDA headers. Clang now provides its own implementation of the wrappers.\n" "#if CUDA_VERSION >= 9000\n" "#include <__clang_cuda_device_functions.h>\n" "#include <__clang_cuda_math.h>\n" "#endif\n" "\n" "// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's\n" "// counterpart does not do it, so we need to make it empty here to keep\n" "// following CUDA includes happy.\n" "#undef __THROW\n" "#define __THROW\n" "\n" "// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.\n" "// Previous versions used to check whether they are defined or not.\n" "// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it\n" "// here to detect the switch.\n" "\n" "#if defined(CU_DEVICE_INVALID)\n" "#if !defined(__USE_FAST_MATH__)\n" "#define __USE_FAST_MATH__ 0\n" "#endif\n" "\n" "#if !defined(__CUDA_PREC_DIV)\n" "#define __CUDA_PREC_DIV 0\n" "#endif\n" "#endif\n" "\n" "// Temporarily poison __host__ macro to ensure it's not used by any of\n" "// the headers we're about to include.\n" "#pragma push_macro(\"__host__\")\n" "#define __host__ UNEXPECTED_HOST_ATTRIBUTE\n" "\n" "// device_functions.hpp and math_functions*.hpp use 'static\n" "// __forceinline__' (with no __device__) for definitions of device\n" "// functions. Temporarily redefine __forceinline__ to include\n" "// __device__.\n" "#pragma push_macro(\"__forceinline__\")\n" "#define __forceinline__ __device__ __inline__ __attribute__((always_inline))\n" "#if CUDA_VERSION < 9000\n" "#include \"device_functions.hpp\"\n" "#endif\n" "\n" "// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we\n" "// get the slow-but-accurate or fast-but-inaccurate versions of functions like\n" "// sin and exp. This is controlled in clang by -fcuda-approx-transcendentals.\n" "//\n" "// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.\n" "// slow divides), so we need to scope our define carefully here.\n" "#pragma push_macro(\"__USE_FAST_MATH__\")\n" "#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)\n" "#define __USE_FAST_MATH__ 1\n" "#endif\n" "\n" "#if CUDA_VERSION >= 9000\n" "#include \"crt/math_functions.hpp\"\n" "#else\n" "#include \"math_functions.hpp\"\n" "#endif\n" "\n" "#pragma pop_macro(\"__USE_FAST_MATH__\")\n" "\n" "#if CUDA_VERSION < 9000\n" "#include \"math_functions_dbl_ptx3.hpp\"\n" "#endif\n" "#pragma pop_macro(\"__forceinline__\")\n" "\n" "// Pull in host-only functions that are only available when neither\n" "// __CUDACC__ nor __CUDABE__ are defined.\n" "#undef __MATH_FUNCTIONS_HPP__\n" "#undef __CUDABE__\n" "#if CUDA_VERSION < 9000\n" "#include \"math_functions.hpp\"\n" "#endif\n" "// Alas, additional overloads for these functions are hard to get to.\n" "// Considering that we only need these overloads for a few functions,\n" "// we can provide them here.\n" "static inline float rsqrt(float __a) { return rsqrtf(__a); }\n" "static inline float rcbrt(float __a) { return rcbrtf(__a); }\n" "static inline float sinpi(float __a) { return sinpif(__a); }\n" "static inline float cospi(float __a) { return cospif(__a); }\n" "static inline void sincospi(float __a, float *__b, float *__c) {\n" " return sincospif(__a, __b, __c);\n" "}\n" "static inline float erfcinv(float __a) { return erfcinvf(__a); }\n" "static inline float normcdfinv(float __a) { return normcdfinvf(__a); }\n" "static inline float normcdf(float __a) { return normcdff(__a); }\n" "static inline float erfcx(float __a) { return erfcxf(__a); }\n" "\n" "#if CUDA_VERSION < 9000\n" "// For some reason single-argument variant is not always declared by\n" "// CUDA headers. Alas, device_functions.hpp included below needs it.\n" "static inline __device__ void __brkpt(int __c) { __brkpt(); }\n" "#endif\n" "\n" "// Now include *.hpp with definitions of various GPU functions. Alas,\n" "// a lot of thins get declared/defined with __host__ attribute which\n" "// we don't want and we have to define it out. We also have to include\n" "// {device,math}_functions.hpp again in order to extract the other\n" "// branch of #if/else inside.\n" "#define __host__\n" "#undef __CUDABE__\n" "#define __CUDACC__\n" "#if CUDA_VERSION >= 9000\n" "// Some atomic functions became compiler builtins in CUDA-9 , so we need their\n" "// declarations.\n" "#include \"device_atomic_functions.h\"\n" "#endif\n" "#undef __DEVICE_FUNCTIONS_HPP__\n" "#include \"device_atomic_functions.hpp\"\n" "#if CUDA_VERSION >= 9000\n" "#include \"crt/device_functions.hpp\"\n" "#include \"crt/device_double_functions.hpp\"\n" "#else\n" "#include \"device_functions.hpp\"\n" "#define __CUDABE__\n" "#include \"device_double_functions.h\"\n" "#undef __CUDABE__\n" "#endif\n" "#include \"sm_20_atomic_functions.hpp\"\n" "// Predicate functions used in `__builtin_assume` need to have no side effect.\n" "// However, sm_20_intrinsics.hpp doesn't define them with neither pure nor\n" "// const attribute. Rename definitions from sm_20_intrinsics.hpp and re-define\n" "// them as pure ones.\n" "#pragma push_macro(\"__isGlobal\")\n" "#pragma push_macro(\"__isShared\")\n" "#pragma push_macro(\"__isConstant\")\n" "#pragma push_macro(\"__isLocal\")\n" "#define __isGlobal __ignored_cuda___isGlobal\n" "#define __isShared __ignored_cuda___isShared\n" "#define __isConstant __ignored_cuda___isConstant\n" "#define __isLocal __ignored_cuda___isLocal\n" "#include \"sm_20_intrinsics.hpp\"\n" "#pragma pop_macro(\"__isGlobal\")\n" "#pragma pop_macro(\"__isShared\")\n" "#pragma pop_macro(\"__isConstant\")\n" "#pragma pop_macro(\"__isLocal\")\n" "#pragma push_macro(\"__DEVICE__\")\n" "#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))\n" "__DEVICE__ unsigned int __isGlobal(const void *p) {\n" " return __nvvm_isspacep_global(p);\n" "}\n" "__DEVICE__ unsigned int __isShared(const void *p) {\n" " return __nvvm_isspacep_shared(p);\n" "}\n" "__DEVICE__ unsigned int __isConstant(const void *p) {\n" " return __nvvm_isspacep_const(p);\n" "}\n" "__DEVICE__ unsigned int __isLocal(const void *p) {\n" " return __nvvm_isspacep_local(p);\n" "}\n" "#pragma pop_macro(\"__DEVICE__\")\n" "#include \"sm_32_atomic_functions.hpp\"\n" "\n" "// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the\n" "// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to\n" "// define them using builtins so that the optimizer can reason about and across\n" "// these instructions. In particular, using intrinsics for ldg gets us the\n" "// [addr+imm] addressing mode, which, although it doesn't actually exist in the\n" "// hardware, seems to generate faster machine code because ptxas can more easily\n" "// reason about our code.\n" "\n" "#if CUDA_VERSION >= 8000\n" "#pragma push_macro(\"__CUDA_ARCH__\")\n" "#undef __CUDA_ARCH__\n" "#include \"sm_60_atomic_functions.hpp\"\n" "#include \"sm_61_intrinsics.hpp\"\n" "#pragma pop_macro(\"__CUDA_ARCH__\")\n" "#endif\n" "\n" "#undef __MATH_FUNCTIONS_HPP__\n" "\n" "// math_functions.hpp defines ::signbit as a __host__ __device__ function. This\n" "// conflicts with libstdc++'s constexpr ::signbit, so we have to rename\n" "// math_function.hpp's ::signbit. It's guarded by #undef signbit, but that's\n" "// conditional on __GNUC__. :)\n" "#pragma push_macro(\"signbit\")\n" "#pragma push_macro(\"__GNUC__\")\n" "#undef __GNUC__\n" "#define signbit __ignored_cuda_signbit\n" "\n" "// CUDA-9 omits device-side definitions of some math functions if it sees\n" "// include guard from math.h wrapper from libstdc++. We have to undo the header\n" "// guard temporarily to get the definitions we need.\n" "#pragma push_macro(\"_GLIBCXX_MATH_H\")\n" "#pragma push_macro(\"_LIBCPP_VERSION\")\n" "#if CUDA_VERSION >= 9000\n" "#undef _GLIBCXX_MATH_H\n" "// We also need to undo another guard that checks for libc++ 3.8+\n" "#ifdef _LIBCPP_VERSION\n" "#define _LIBCPP_VERSION 3700\n" "#endif\n" "#endif\n" "\n" "#if CUDA_VERSION >= 9000\n" "#include \"crt/math_functions.hpp\"\n" "#else\n" "#include \"math_functions.hpp\"\n" "#endif\n" "#pragma pop_macro(\"_GLIBCXX_MATH_H\")\n" "#pragma pop_macro(\"_LIBCPP_VERSION\")\n" "#pragma pop_macro(\"__GNUC__\")\n" "#pragma pop_macro(\"signbit\")\n" "\n" "#pragma pop_macro(\"__host__\")\n" "\n" "// __clang_cuda_texture_intrinsics.h must be included first in order to provide\n" "// implementation for __nv_tex_surf_handler that CUDA's headers depend on.\n" "// The implementation requires c++11 and only works with CUDA-9 or newer.\n" "#if __cplusplus >= 201103L && CUDA_VERSION >= 9000\n" "// clang-format off\n" "#include <__clang_cuda_texture_intrinsics.h>\n" "// clang-format on\n" "#else\n" "#if CUDA_VERSION >= 9000\n" "// Provide a hint that texture support needs C++11.\n" "template struct __nv_tex_needs_cxx11 {\n" " const static bool value = false;\n" "};\n" "template \n" "__host__ __device__ void __nv_tex_surf_handler(const char *name, T *ptr,\n" " cudaTextureObject_t obj,\n" " float x) {\n" " _Static_assert(__nv_tex_needs_cxx11::value,\n" " \"Texture support requires C++11\");\n" "}\n" "#else\n" "// Textures in CUDA-8 and older are not supported by clang.There's no\n" "// convenient way to intercept texture use in these versions, so we can't\n" "// produce a meaningful error. The source code that attempts to use textures\n" "// will continue to fail as it does now.\n" "#endif // CUDA_VERSION\n" "#endif // __cplusplus >= 201103L && CUDA_VERSION >= 9000\n" "#include \"texture_fetch_functions.h\"\n" "#include \"texture_indirect_functions.h\"\n" "\n" "// Restore state of __CUDA_ARCH__ and __THROW we had on entry.\n" "#pragma pop_macro(\"__CUDA_ARCH__\")\n" "#pragma pop_macro(\"__THROW\")\n" "\n" "// Set up compiler macros expected to be seen during compilation.\n" "#undef __CUDABE__\n" "#define __CUDACC__\n" "\n" "extern \"C\" {\n" "// Device-side CUDA system calls.\n" "// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls\n" "// We need these declarations and wrappers for device-side\n" "// malloc/free/printf calls to work without relying on\n" "// -fcuda-disable-target-call-checks option.\n" "__device__ int vprintf(const char *, const char *);\n" "__device__ void free(void *) __attribute((nothrow));\n" "__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));\n" "\n" "// __assertfail() used to have a `noreturn` attribute. Unfortunately that\n" "// contributed to triggering the longstanding bug in ptxas when assert was used\n" "// in sufficiently convoluted code. See\n" "// https://bugs.llvm.org/show_bug.cgi?id=27738 for the details.\n" "__device__ void __assertfail(const char *__message, const char *__file,\n" " unsigned __line, const char *__function,\n" " size_t __charSize);\n" "\n" "// In order for standard assert() macro on linux to work we need to\n" "// provide device-side __assert_fail()\n" "__device__ static inline void __assert_fail(const char *__message,\n" " const char *__file, unsigned __line,\n" " const char *__function) {\n" " __assertfail(__message, __file, __line, __function, sizeof(char));\n" "}\n" "\n" "// Clang will convert printf into vprintf, but we still need\n" "// device-side declaration for it.\n" "__device__ int printf(const char *, ...);\n" "} // extern \"C\"\n" "\n" "// We also need device-side std::malloc and std::free.\n" "namespace std {\n" "__device__ static inline void free(void *__ptr) { ::free(__ptr); }\n" "__device__ static inline void *malloc(size_t __size) {\n" " return ::malloc(__size);\n" "}\n" "} // namespace std\n" "\n" "// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to\n" "// come after we've pulled in the definition of uint3 and dim3.\n" "\n" "__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const {\n" " return dim3(x, y, z);\n" "}\n" "\n" "__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {\n" " return {x, y, z};\n" "}\n" "\n" "__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const {\n" " return dim3(x, y, z);\n" "}\n" "\n" "__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {\n" " return {x, y, z};\n" "}\n" "\n" "__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {\n" " return dim3(x, y, z);\n" "}\n" "\n" "__device__ inline __cuda_builtin_blockDim_t::operator uint3() const {\n" " return {x, y, z};\n" "}\n" "\n" "__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {\n" " return dim3(x, y, z);\n" "}\n" "\n" "__device__ inline __cuda_builtin_gridDim_t::operator uint3() const {\n" " return {x, y, z};\n" "}\n" "\n" "#include <__clang_cuda_cmath.h>\n" "#include <__clang_cuda_intrinsics.h>\n" "#include <__clang_cuda_complex_builtins.h>\n" "\n" "// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host\n" "// mode, giving them their \"proper\" types of dim3 and uint3. This is\n" "// incompatible with the types we give in __clang_cuda_builtin_vars.h. As as\n" "// hack, force-include the header (nvcc doesn't include it by default) but\n" "// redefine dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are\n" "// only used here for the redeclarations of blockDim and threadIdx.)\n" "#pragma push_macro(\"dim3\")\n" "#pragma push_macro(\"uint3\")\n" "#define dim3 __cuda_builtin_blockDim_t\n" "#define uint3 __cuda_builtin_threadIdx_t\n" "#include \"curand_mtgp32_kernel.h\"\n" "#pragma pop_macro(\"dim3\")\n" "#pragma pop_macro(\"uint3\")\n" "#pragma pop_macro(\"__USE_FAST_MATH__\")\n" "#pragma pop_macro(\"__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__\")\n" "\n" "// CUDA runtime uses this undocumented function to access kernel launch\n" "// configuration. The declaration is in crt/device_functions.h but that file\n" "// includes a lot of other stuff we don't want. Instead, we'll provide our own\n" "// declaration for it here.\n" "#if CUDA_VERSION >= 9020\n" "extern \"C\" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim,\n" " size_t sharedMem = 0,\n" " void *stream = 0);\n" "#endif\n" "\n" "#endif // __CUDA__\n" "#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__\n" "" } , { "/builtins/__clang_cuda_texture_intrinsics.h" , "/*===--- __clang_cuda_texture_intrinsics.h - Device-side texture support ---===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " *\n" " * This header provides in-header implmentations for NVCC's built-in\n" " * __nv_tex_surf_handler() which is used by CUDA's texture-related headers. The\n" " * built-in is unusual as it's actually a set of function overloads that use the\n" " * first string literal argument as one of the overload parameters.\n" " */\n" "#ifndef __CLANG_CUDA_TEXTURE_INTRINSICS_H__\n" "#define __CLANG_CUDA_TEXTURE_INTRINSICS_H__\n" "#ifndef __CUDA__\n" "#error \"This file is for CUDA compilation only.\"\n" "#endif\n" "\n" "// __nv_tex_surf_handler() provided by this header as a macro.\n" "#define __nv_tex_surf_handler(__op, __ptr, ...) \\\n" " ::__cuda_tex::__tex_fetch< \\\n" " ::__cuda_tex::__Tag<::__cuda_tex::__tex_op_hash(__op)>>(__ptr, \\\n" " __VA_ARGS__)\n" "\n" "#pragma push_macro(\"__ASM_OUT\")\n" "#pragma push_macro(\"__ASM_OUTP\")\n" "#pragma push_macro(\"__Args\")\n" "#pragma push_macro(\"__ID\")\n" "#pragma push_macro(\"__IDV\")\n" "#pragma push_macro(\"__IMPL_2DGATHER\")\n" "#pragma push_macro(\"__IMPL_ALIAS\")\n" "#pragma push_macro(\"__IMPL_ALIASI\")\n" "#pragma push_macro(\"__IMPL_F1\")\n" "#pragma push_macro(\"__IMPL_F3\")\n" "#pragma push_macro(\"__IMPL_F3N\")\n" "#pragma push_macro(\"__IMPL_F3S\")\n" "#pragma push_macro(\"__IMPL_S\")\n" "#pragma push_macro(\"__IMPL_S3\")\n" "#pragma push_macro(\"__IMPL_S3I\")\n" "#pragma push_macro(\"__IMPL_S3N\")\n" "#pragma push_macro(\"__IMPL_S3NI\")\n" "#pragma push_macro(\"__IMPL_S3S\")\n" "#pragma push_macro(\"__IMPL_S3SI\")\n" "#pragma push_macro(\"__IMPL_SI\")\n" "#pragma push_macro(\"__L\")\n" "#pragma push_macro(\"__STRIP_PARENS\")\n" "\n" "// Put all functions into anonymous namespace so they have internal linkage.\n" "// The device-only function here must be internal in order to avoid ODR\n" "// violations in case they are used from the files compiled with\n" "// -fgpu-rdc. E.g. a library and an app using it may be built with a different\n" "// version of this header file.\n" "namespace {\n" "\n" "// Put the implmentation into its own namespace so we don't pollute the TU.\n" "namespace __cuda_tex {\n" "\n" "// First, we need a perfect hash function and a few constexpr helper functions\n" "// for converting a string literal into a numeric value which can be used to\n" "// parametrize a template. We can not use string literals for that as that would\n" "// require C++20.\n" "//\n" "// The hash function was generated with 'gperf' and then manually converted into\n" "// its constexpr equivalent.\n" "//\n" "// NOTE: the perfect hashing scheme comes with inherent self-test. If the hash\n" "// function has a collision for any of the texture operations, the compilation\n" "// will fail due to an attempt to redefine a tag with the same value. If the\n" "// header compiles, then the hash function is good enough for the job.\n" "\n" "constexpr int __tex_len(const char *s) {\n" " return (s[0] == 0) ? 0\n" " : (s[1] == 0) ? 1\n" " : (s[2] == 0) ? 2\n" " : (s[3] == 0) ? 3\n" " : (s[4] == 0) ? 4\n" " : (s[5] == 0) ? 5\n" " : (s[6] == 0) ? 6\n" " : (s[7] == 0) ? 7\n" " : (s[8] == 0) ? 8\n" " : (s[9] == 0) ? 9\n" " : (s[10] == 0) ? 10\n" " : (s[11] == 0) ? 11\n" " : (s[12] == 0) ? 12\n" " : (s[13] == 0) ? 13\n" " : (s[14] == 0) ? 14\n" " : (s[15] == 0) ? 15\n" " : (s[16] == 0) ? 16\n" " : (s[17] == 0) ? 17\n" " : (s[18] == 0) ? 18\n" " : (s[19] == 0) ? 19\n" " : (s[20] == 0) ? 20\n" " : (s[21] == 0) ? 21\n" " : (s[22] == 0) ? 22\n" " : (s[23] == 0) ? 23\n" " : (s[24] == 0) ? 24\n" " : (s[25] == 0) ? 25\n" " : (s[26] == 0) ? 26\n" " : (s[27] == 0) ? 27\n" " : (s[28] == 0) ? 28\n" " : (s[29] == 0) ? 29\n" " : (s[30] == 0) ? 30\n" " : (s[31] == 0) ? 31\n" " : 32;\n" "}\n" "\n" "constexpr int __tex_hash_map(int c) {\n" " return (c == 49) ? 10\n" " : (c == 50) ? 0\n" " : (c == 51) ? 100\n" " : (c == 52) ? 30\n" " : (c == 67) ? 10\n" " : (c == 68) ? 0\n" " : (c == 69) ? 25\n" " : (c == 72) ? 70\n" " : (c == 77) ? 0\n" " : (c == 96) ? 44\n" " : (c == 99) ? 10\n" " : (c == 100) ? 5\n" " : (c == 101) ? 60\n" " : (c == 102) ? 40\n" " : (c == 103) ? 70\n" " : (c == 104) ? 25\n" " : (c == 112) ? 0\n" " : (c == 114) ? 45\n" " : (c == 117) ? 5\n" " : (c == 118) ? 85\n" " : (c == 120) ? 20\n" " : 225;\n" "}\n" "\n" "constexpr int __tex_op_hash(const char *str) {\n" " return __tex_len(str) + __tex_hash_map(str[7] + 1) + __tex_hash_map(str[6]) +\n" " __tex_hash_map(str[5]) + __tex_hash_map(str[__tex_len(str) - 1]);\n" "}\n" "\n" "// Tag type to identify particular texture operation.\n" "template struct __Tag;\n" "#define __ID(__op) __Tag<__tex_op_hash(__op)>\n" "// Tags for variants of particular operation. E.g. tex2Dgather can translate\n" "// into 4 different instructions.\n" "#define __IDV(__op, __variant) \\\n" " __Tag<10000 + __tex_op_hash(__op) * 100 + __variant>\n" "\n" "// Helper classes for figuring out key data types for derived types.\n" "// E.g. char2 has __base_t = char, __fetch_t = char4\n" "template struct __TypeInfoT;\n" "// Type info for the fundamental types.\n" "template <> struct __TypeInfoT {\n" " using __base_t = float;\n" " using __fetch_t = float4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = char;\n" " using __fetch_t = int4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = signed char;\n" " using __fetch_t = int4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = unsigned char;\n" " using __fetch_t = uint4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = short;\n" " using __fetch_t = int4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = unsigned short;\n" " using __fetch_t = uint4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = int;\n" " using __fetch_t = int4;\n" "};\n" "template <> struct __TypeInfoT {\n" " using __base_t = unsigned int;\n" " using __fetch_t = uint4;\n" "};\n" "\n" "// Derived base/fetch types for N-element vectors.\n" "template struct __TypeInfoT {\n" " using __base_t = decltype(__T::x);\n" " using __fetch_t = typename __TypeInfoT<__base_t>::__fetch_t;\n" "};\n" "\n" "// Classes that implement specific texture ops.\n" "template struct __tex_fetch_v4;\n" "\n" "// Helper macros to strip parens from a macro argument.\n" "#define __Args(...) __VA_ARGS__\n" "#define __STRIP_PARENS(__X) __X\n" "#define __L(__X) __STRIP_PARENS(__Args __X)\n" "\n" "// Construct inline assembly output args.\n" "// Results are stored in a temp var __r.\n" "// isResident bool is pointed to by __ir\n" "// Asm args for return values. It's a 4-element vector\n" "#define __ASM_OUT(__t) \\\n" " (\"=\" __t(__r.x), \"=\" __t(__r.y), \"=\" __t(__r.z), \"=\" __t(__r.w))\n" "// .. possibly combined with a predicate.\n" "#define __ASM_OUTP(__t) (__L(__ASM_OUT(__t)), \"=h\"(*__ir))\n" "\n" "// Implements a single variant of texture fetch instruction.\n" "#define __IMPL_F1(__rt, __dt, __args, __asm_op, __asm_outs, __asm_args) \\\n" " template <> \\\n" " __device__ __rt __run<__dt>(cudaTextureObject_t __obj, __L(__args)) { \\\n" " __rt __r; \\\n" " asm(__asm_op : __L(__asm_outs) : \"l\"(__obj), __L(__asm_args)); \\\n" " return __r; \\\n" " }\n" "\n" "// Implements texture fetch instructions for int4/uint4/float4 data types.\n" "#define __IMPL_F3(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_F1(int4, int4, __args, __asm_op \".s32.\" __ctype \"\\t\" __asm_op_args, \\\n" " __ASM_OUT(\"r\"), __asm_args) \\\n" " __IMPL_F1(uint4, uint4, __args, __asm_op \".u32.\" __ctype \"\\t\" __asm_op_args, \\\n" " __ASM_OUT(\"r\"), __asm_args) \\\n" " __IMPL_F1(float4, float4, __args, \\\n" " __asm_op \".f32.\" __ctype \"\\t\" __asm_op_args, __ASM_OUT(\"f\"), \\\n" " __asm_args)\n" "// Implements 'sparse' texture fetch instructions for int4/uint4/float4 data\n" "// types. Similar to above, but returns a boolean 'isPresent' value in addition\n" "// to texture data,\n" "#define __IMPL_F3S(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_F1(int4, int4, __args, __asm_op \".s32.\" __ctype \"\\t\" __asm_op_args, \\\n" " __ASM_OUTP(\"r\"), __asm_args) \\\n" " __IMPL_F1(uint4, uint4, __args, __asm_op \".u32.\" __ctype \"\\t\" __asm_op_args, \\\n" " __ASM_OUTP(\"r\"), __asm_args) \\\n" " __IMPL_F1(float4, float4, __args, \\\n" " __asm_op \".f32.\" __ctype \"\\t\" __asm_op_args, __ASM_OUTP(\"f\"), \\\n" " __asm_args)\n" "\n" "// Similar to F3, but for integer data which is returned as normalized floats.\n" "// Only instantiates fetch functions for int4/uint4.\n" "#define __IMPL_F3N(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_F1(float4, int4, __args, __asm_op \".s32.\" __ctype \"\\t\" __asm_op_args, \\\n" " __ASM_OUT(\"r\"), __asm_args) \\\n" " __IMPL_F1(float4, uint4, __args, \\\n" " __asm_op \".u32.\" __ctype \"\\t\" __asm_op_args, __ASM_OUT(\"r\"), \\\n" " __asm_args)\n" "\n" "// Instantiates __tex_fetch_v4 with regular fetch functions.\n" "#define __IMPL_S3I(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " template <> struct __tex_fetch_v4<__op> { \\\n" " template \\\n" " __device__ static T __run(cudaTextureObject_t __obj, __L(__args)); \\\n" " __IMPL_F3(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " }\n" "\n" "// Same, but for sparse ops. Only available on sm_60+\n" "#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)\n" "#define __IMPL_S3SI(__op, __args, __asm_op, __ctype, __asm_op_args, \\\n" " __asm_args) \\\n" " template <> struct __tex_fetch_v4<__op> { \\\n" " template \\\n" " __device__ static T __run(cudaTextureObject_t __obj, __L(__args)); \\\n" " __IMPL_F3S(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " }\n" "#else\n" "#define __IMPL_S3SI(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args)\n" "#endif\n" "\n" "// Same, but for normalized float ops.\n" "#define __IMPL_S3NI(__op, __args, __asm_op, __ctype, __asm_op_args, \\\n" " __asm_args) \\\n" " template <> struct __tex_fetch_v4<__op> { \\\n" " template \\\n" " __device__ static float4 __run(cudaTextureObject_t __obj, __L(__args)); \\\n" " __IMPL_F3N(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " }\n" "\n" "// Regular and normalized float ops share a lot of similarities. This macro\n" "// instantiates both variants -- normal for __op and normalized for __opn.\n" "#define __IMPL_SI(__op, __opn, __args, __asm_op, __ctype, __asm_op_args, \\\n" " __asm_args) \\\n" " __IMPL_S3I(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args); \\\n" " __IMPL_S3NI(__opn, __args, __asm_op, __ctype, __asm_op_args, __asm_args)\n" "\n" "// Convenience macros which converts string literal __op into a __Tag,\n" "#define __IMPL_S3(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_S3I(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)\n" "#define __IMPL_S3S(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_S3SI(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)\n" "#define __IMPL_S3N(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \\\n" " __IMPL_S3NI(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)\n" "#define __IMPL_S(__op, __opn, __args, __asm_op, __ctype, __asm_op_args, \\\n" " __asm_args) \\\n" " __IMPL_SI(__ID(__op), __ID(__opn), __args, __asm_op, __ctype, __asm_op_args, \\\n" " __asm_args)\n" "\n" "// CUDA headers have some 'legacy' texture oprerations that duplicate\n" "// functionality. So, we just inherit it, instead of refining a copy.\n" "#define __IMPL_ALIASI(__op, __opn) \\\n" " template <> struct __tex_fetch_v4<__op> : __tex_fetch_v4<__opn> {}\n" "#define __IMPL_ALIAS(__op, __opn) __IMPL_ALIASI(__ID(__op), __ID(__opn))\n" "\n" "// Now we can instantiate everything we need for each specific texture fetch\n" "// variant.\n" "__IMPL_S(\"__tex1D_v2\", \"__tex1D_rmnf_v2\", (float __x), \"tex.1d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5}];\", (\"f\"(__x)));\n" "__IMPL_S(\"__tex1Dfetch_v2\", \"__tex1Dfetch_rmnf_v2\", (int __x), \"tex.1d.v4\",\n" " \"s32\", \"{%0, %1, %2, %3}, [%4, {%5}];\", (\"r\"(__x)));\n" "__IMPL_ALIAS(\"__itex1D\", \"__tex1D_v2\");\n" "__IMPL_ALIAS(\"__itex1Dfetch\", \"__tex1Dfetch_v2\");\n" "\n" "__IMPL_S(\"__tex1DGrad_v2\", \"__tex1DGrad_rmnf_v2\",\n" " (float __x, float __dPdx, float __dPdy), \"tex.grad.1d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5}], {%6}, {%7};\",\n" " (\"f\"(__x), \"f\"(__dPdx), \"f\"(__dPdy)));\n" "__IMPL_ALIAS(\"__itex1DGrad\", \"__tex1DGrad_v2\");\n" "\n" "__IMPL_S(\"__tex1DLayered_v2\", \"__tex1DLayered_rmnf_v2\",\n" " (float __x, int __layer), \"tex.a1d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}];\", (\"r\"(__layer), \"f\"(__x)));\n" "__IMPL_ALIAS(\"__itex1DLayered\", \"__tex1DLayered_v2\");\n" "\n" "__IMPL_S(\"__tex1DLayeredGrad_v2\", \"__tex1DLayeredGrad_rmnf_v2\",\n" " (float __x, int __layer, float __dPdx, float __dPdy),\n" " \"tex.grad.a1d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}], {%7}, {%8};\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__dPdx), \"f\"(__dPdy)));\n" "__IMPL_ALIAS(\"__itex1DLayeredGrad\", \"__tex1DLayeredGrad_v2\");\n" "\n" "__IMPL_S(\"__tex1DLayeredLod_v2\", \"__tex1DLayeredLod_rmnf_v2\",\n" " (float __x, int __layer, float __level), \"tex.level.a1d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}], %7;\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itex1DLayeredLod\", \"__tex1DLayeredLod_v2\");\n" "\n" "__IMPL_S(\"__tex1DLod_v2\", \"__tex1DLod_rmnf_v2\", (float __x, float __level),\n" " \"tex.level.1d.v4\", \"f32\", \"{%0, %1, %2, %3}, [%4, {%5}], %6;\",\n" " (\"f\"(__x), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itex1DLod\", \"__tex1DLod_v2\");\n" "\n" "// 2D\n" "__IMPL_S(\"__tex2D_v2\", \"__tex2D_rmnf_v2\", (float __x, float __y), \"tex.2d.v4\",\n" " \"f32\", \"{%0, %1, %2, %3}, [%4, {%5, %6}];\", (\"f\"(__x), \"f\"(__y)));\n" "__IMPL_ALIAS(\"__itex2D\", \"__tex2D_v2\");\n" "\n" "__IMPL_S3S(\"__itex2D_sparse\", (float __x, float __y, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}];\\n\\t\"\n" " \" selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y)));\n" "\n" "__IMPL_S(\"__tex2DGrad_v2\", \"__tex2DGrad_rmnf_v2\",\n" " (float __x, float __y, const float2 *__dPdx, const float2 *__dPdy),\n" " \"tex.grad.2d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}], {%7, %8}, {%9, %10};\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__dPdx->x), \"f\"(__dPdx->y), \"f\"(__dPdy->x),\n" " \"f\"(__dPdy->y)));\n" "__IMPL_ALIAS(\"__itex2DGrad_v2\", \"__tex2DGrad_v2\");\n" "\n" "__IMPL_S3S(\"__itex2DGrad_sparse\",\n" " (float __x, float __y, const float2 *__dPdx, const float2 *__dPdy,\n" " unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.grad.2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}], {%8, %9}, {%10, %11};\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__dPdx->x), \"f\"(__dPdx->y), \"f\"(__dPdy->x),\n" " \"f\"(__dPdy->y)));\n" "\n" "__IMPL_S(\"__tex2DLayered_v2\", \"__tex2DLayered_rmnf_v2\",\n" " (float __x, float __y, int __layer), \"tex.a2d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y)));\n" "__IMPL_ALIAS(\"__itex2DLayered\", \"__tex2DLayered_v2\");\n" "\n" "__IMPL_S3S(\"__itex2DLayered_sparse\",\n" " (float __x, float __y, int __layer, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.a2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y)));\n" "\n" "__IMPL_S(\"__tex2DLayeredGrad_v2\", \"__tex2DLayeredGrad_rmnf_v2\",\n" " (float __x, float __y, int __layer, const float2 *__dPdx,\n" " const float2 *__dPdy),\n" " \"tex.grad.a2d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], {%8, %9}, {%10, %11};\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__dPdx->x), \"f\"(__dPdx->y),\n" " \"f\"(__dPdy->x), \"f\"(__dPdy->y)));\n" "__IMPL_ALIAS(\"__itex2DLayeredGrad_v2\", \"__tex2DLayeredGrad_v2\");\n" "\n" "__IMPL_S3S(\n" " \"__itex2DLayeredGrad_sparse\",\n" " (float __x, float __y, int __layer, const float2 *__dPdx,\n" " const float2 *__dPdy, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.grad.a2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], {%9, %10}, {%11, %12};\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__dPdx->x), \"f\"(__dPdx->y),\n" " \"f\"(__dPdy->x), \"f\"(__dPdy->y)));\n" "\n" "__IMPL_S(\"__tex2DLayeredLod_v2\", \"__tex2DLayeredLod_rmnf_v2\",\n" " (float __x, float __y, int __layer, float __level), \"tex.level.a2d.v4\",\n" " \"f32\", \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itex2DLayeredLod\", \"__tex2DLayeredLod_v2\");\n" "\n" "__IMPL_S3S(\"__itex2DLayeredLod_sparse\",\n" " (float __x, float __y, int __layer, float __level,\n" " unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.level.a2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], %9;\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__level)));\n" "\n" "__IMPL_S(\"__tex2DLod_v2\", \"__tex2DLod_rmnf_v2\",\n" " (float __x, float __y, float __level), \"tex.level.2d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}], %7;\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itex2DLod\", \"__tex2DLod_v2\");\n" "\n" "__IMPL_S3S(\"__itex2DLod_sparse\",\n" " (float __x, float __y, float __level, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.level.2d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}], %8;\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__level)));\n" "\n" "// 2D gather is special. Unlike other variants that translate into exactly one\n" "// asm instruction, it uses one of the four different instructions selected by\n" "// __comp. We implement each instruction variant separately, and dispatch the\n" "// right one from the manually implemented 'umbrella' fetch.\n" "#define __IMPL_2DGATHER(variant, instr) \\\n" " __IMPL_SI(__IDV(\"__tex2Dgather_v2\", variant), \\\n" " __IDV(\"__tex2Dgather_rmnf_v2\", variant), \\\n" " (float __x, float __y, int __comp), instr, \"f32\", \\\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6}];\", (\"f\"(__x), \"f\"(__y))); \\\n" " __IMPL_ALIASI(__IDV(\"__itex2Dgather\", variant), \\\n" " __IDV(\"__tex2Dgather_v2\", variant)); \\\n" " __IMPL_S3SI(__IDV(\"__itex2Dgather_sparse\", variant), \\\n" " (float __x, float __y, unsigned char *__ir, int __comp), \\\n" " \"{.reg .pred %%p0;\\n\\t\" instr, \"f32\", \\\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}];\\n\\t\" \\\n" " \"selp.u16 %4, 1, 0, %%p0; }\", \\\n" " (\"f\"(__x), \"f\"(__y)));\n" "__IMPL_2DGATHER(0, \"tld4.r.2d.v4\");\n" "__IMPL_2DGATHER(1, \"tld4.g.2d.v4\");\n" "__IMPL_2DGATHER(2, \"tld4.b.2d.v4\");\n" "__IMPL_2DGATHER(3, \"tld4.a.2d.v4\");\n" "\n" "// Umbrella dispatcher -- calls into specific 2Dgather variant.\n" "template <> struct __tex_fetch_v4<__ID(\"__tex2Dgather_v2\")> {\n" " template \n" " __device__ static __T __run(cudaTextureObject_t __obj, float __x, float __y,\n" " int __comp) {\n" " switch (__comp) {\n" " case 0:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_v2\", 0)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 1:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_v2\", 1)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 2:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_v2\", 2)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 3:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_v2\", 3)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " }\n" " }\n" "};\n" "__IMPL_ALIAS(\"__itex2Dgather\", \"__tex2Dgather_v2\");\n" "\n" "template <> struct __tex_fetch_v4<__ID(\"__tex2Dgather_rmnf_v2\")> {\n" " template \n" " __device__ static float4 __run(cudaTextureObject_t __obj, float __x,\n" " float __y, int __comp) {\n" " switch (__comp) {\n" " case 0:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_rmnf_v2\", 0)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 1:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_rmnf_v2\", 1)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 2:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_rmnf_v2\", 2)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " case 3:\n" " return __tex_fetch_v4<__IDV(\"__tex2Dgather_rmnf_v2\", 3)>::__run<__T>(\n" " __obj, __x, __y, __comp);\n" " }\n" " }\n" "};\n" "\n" "#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)\n" "template <> struct __tex_fetch_v4<__ID(\"__itex2Dgather_sparse\")> {\n" " template \n" " __device__ static __T __run(cudaTextureObject_t __obj, float __x, float __y,\n" " unsigned char *__ir, int __comp) {\n" " switch (__comp) {\n" " case 0:\n" " return __tex_fetch_v4<__IDV(\"__itex2Dgather_sparse\", 0)>::__run<__T>(\n" " __obj, __x, __y, __ir, __comp);\n" " case 1:\n" " return __tex_fetch_v4<__IDV(\"__itex2Dgather_sparse\", 1)>::__run<__T>(\n" " __obj, __x, __y, __ir, __comp);\n" " case 2:\n" " return __tex_fetch_v4<__IDV(\"__itex2Dgather_sparse\", 2)>::__run<__T>(\n" " __obj, __x, __y, __ir, __comp);\n" " case 3:\n" " return __tex_fetch_v4<__IDV(\"__itex2Dgather_sparse\", 3)>::__run<__T>(\n" " __obj, __x, __y, __ir, __comp);\n" " }\n" " }\n" "};\n" "#endif\n" "\n" "// 3D\n" "__IMPL_S(\"__tex3D_v2\", \"__tex3D_rmnf_v2\", (float __x, float __y, float __z),\n" " \"tex.3d.v4\", \"f32\", \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z)));\n" "__IMPL_ALIAS(\"__itex3D\", \"__tex3D_v2\");\n" "\n" "__IMPL_S3S(\"__itex3D_sparse\",\n" " (float __x, float __y, float __z, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.3d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z)));\n" "\n" "__IMPL_S(\"__tex3DGrad_v2\", \"__tex3DGrad_rmnf_v2\",\n" " (float __x, float __y, float __z, const float4 *__dPdx,\n" " const float4 *__dPdy),\n" " \"tex.grad.3d.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], \"\n" " \"{%8, %9, %10, %10}, {%11, %12, %13, %13};\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__dPdx->x), \"f\"(__dPdx->y),\n" " \"f\"(__dPdx->z), \"f\"(__dPdy->x), \"f\"(__dPdy->y), \"f\"(__dPdy->z)));\n" "__IMPL_ALIAS(\"__itex3DGrad_v2\", \"__tex3DGrad_v2\");\n" "\n" "__IMPL_S3S(\"__itex3DGrad_sparse\",\n" " (float __x, float __y, float __z, const float4 *__dPdx,\n" " const float4 *__dPdy, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.grad.3d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], \"\n" " \"{%9, %10, %11, %11}, {%12, %13, %14, %14};\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__dPdx->x), \"f\"(__dPdx->y),\n" " \"f\"(__dPdx->z), \"f\"(__dPdy->x), \"f\"(__dPdy->y), \"f\"(__dPdy->z)));\n" "\n" "__IMPL_S(\"__tex3DLod_v2\", \"__tex3DLod_rmnf_v2\",\n" " (float __x, float __y, float __z, float __level), \"tex.level.3d.v4\",\n" " \"f32\", \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itex3DLod\", \"__tex3DLod_v2\");\n" "\n" "__IMPL_S3S(\"__itex3DLod_sparse\",\n" " (float __x, float __y, float __z, float __level,\n" " unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.level.3d.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], %9;\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__level)));\n" "\n" "// Cubemap\n" "__IMPL_S(\"__texCubemap_v2\", \"__texCubemap_rmnf_v2\",\n" " (float __x, float __y, float __z), \"tex.cube.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z)));\n" "__IMPL_ALIAS(\"__itexCubemap\", \"__texCubemap_v2\");\n" "\n" "__IMPL_S3S(\"__itexCubemap_sparse\",\n" " (float __x, float __y, float __z, unsigned char *__ir),\n" " \"{.reg .pred %%p0;\\n\\t\"\n" " \"tex.cube.v4\",\n" " \"f32\",\n" " \"{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\\n\\t\"\n" " \"selp.u16 %4, 1, 0, %%p0; }\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z)));\n" "\n" "__IMPL_S(\"__texCubemapGrad_v2\", \"__texCubemapGrad_rmnf_v2\",\n" " (float __x, float __y, float __z, const float4 *__dPdx,\n" " const float4 *__dPdy),\n" " \"tex.grad.cube.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], \"\n" " \"{%8, %9, %10, %10}, {%11, %12, %13, %13};\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__dPdx->x), \"f\"(__dPdx->y),\n" " \"f\"(__dPdx->z), \"f\"(__dPdy->x), \"f\"(__dPdy->y), \"f\"(__dPdy->z)));\n" "__IMPL_ALIAS(\"__itexCubemapGrad_v2\", \"__texCubemapGrad_v2\");\n" "\n" "__IMPL_S(\"__texCubemapLayered_v2\", \"__texCubemapLayered_rmnf_v2\",\n" " (float __x, float __y, float __z, int __layer), \"tex.acube.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}];\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__z)));\n" "__IMPL_ALIAS(\"__itexCubemapLayered\", \"__texCubemapLayered_v2\");\n" "\n" "__IMPL_S(\"__texCubemapLayeredGrad_v2\", \"__texCubemapLayeredGrad_rmnf_v2\",\n" " (float __x, float __y, float __z, int __layer, const float4 *__dPdx,\n" " const float4 *__dPdy),\n" " \"tex.grad.acube.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}], \"\n" " \"{%9, %10, %11, %11}, {%12, %13, %14, %14};\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__dPdx->x),\n" " \"f\"(__dPdx->y), \"f\"(__dPdx->z), \"f\"(__dPdy->x), \"f\"(__dPdy->y),\n" " \"f\"(__dPdy->z)));\n" "__IMPL_ALIAS(\"__itexCubemapLayeredGrad_v2\", \"__texCubemapLayeredGrad_v2\");\n" "\n" "__IMPL_S(\"__texCubemapLayeredLod_v2\", \"__texCubemapLayeredLod_rmnf_v2\",\n" " (float __x, float __y, float __z, int __layer, float __level),\n" " \"tex.level.acube.v4\", \"f32\",\n" " \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}], %9;\",\n" " (\"r\"(__layer), \"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itexCubemapLayeredLod\", \"__texCubemapLayeredLod_v2\");\n" "\n" "__IMPL_S(\"__texCubemapLod_v2\", \"__texCubemapLod_rmnf_v2\",\n" " (float __x, float __y, float __z, float __level), \"tex.level.cube.v4\",\n" " \"f32\", \"{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;\",\n" " (\"f\"(__x), \"f\"(__y), \"f\"(__z), \"f\"(__level)));\n" "__IMPL_ALIAS(\"__itexCubemapLod\", \"__texCubemapLod_v2\");\n" "\n" "// Helper class for extracting slice of data from V4 fetch results.\n" "template struct __convert {\n" " template ::__base_t)>\n" " __device__ static __DestT __run(__SrcT __v);\n" " template <> __device__ static __DestT __run<1>(__SrcT __v) { return {__v.x}; }\n" " template <> __device__ static __DestT __run<2>(__SrcT __v) {\n" " return {__v.x, __v.y};\n" " }\n" " template <> __device__ static __DestT __run<3>(__SrcT __v) {\n" " return {__v.x, __v.y, __v.z};\n" " }\n" " template <> __device__ static __DestT __run<4>(__SrcT __v) {\n" " return {__v.x, __v.y, __v.z, __v.w};\n" " }\n" "};\n" "\n" "// These are the top-level function overloads the __nv_tex_surf_handler expands\n" "// to. Each overload deals with one of the several ways __nv_tex_surf_handler\n" "// is called by CUDA headers. In the end, each of the overloads does the same\n" "// job -- it figures out which `__tex_fetch_v4::run` variant should be used to\n" "// fetch texture data and which `__convert::run` is needed to convert it into\n" "// appropriate return type.\n" "\n" "// __nv_tex_surf_handler(\"__tex...\", &ret, cudaTextureObject_t handle, args...);\n" "// Data type and return type are based on ret.\n" "template \n" "__device__ static void __tex_fetch(__T *__ptr, cudaTextureObject_t __handle,\n" " __Args... __args) {\n" " using __FetchT = typename __TypeInfoT<__T>::__fetch_t;\n" " *__ptr = __convert<__T, __FetchT>::__run(\n" " __tex_fetch_v4<__op>::template __run<__FetchT>(__handle, __args...));\n" "}\n" "\n" "#if CUDA_VERSION < 12000\n" "// texture<> objects get magically converted into a texture reference. However,\n" "// there's no way to convert them to cudaTextureObject_t on C++ level. So, we\n" "// cheat a bit and use inline assembly to do it. It costs us an extra register\n" "// and a move, but that is easy for ptxas to optimize away.\n" "template \n" "__device__ cudaTextureObject_t __tex_handle_to_obj(__T __handle) {\n" " cudaTextureObject_t __obj;\n" " asm(\"mov.b64 %0, %1; \" : \"=l\"(__obj) : \"l\"(__handle));\n" " return __obj;\n" "}\n" "\n" "// __nv_tex_surf_handler (\"__tex...\", &ret, textureReference, args...);\n" "// Data type and return type is based on ret.\n" "template \n" "__device__ static void __tex_fetch(__T *__ptr, __HandleT __handle,\n" " __Args... __args) {\n" " using __FetchT = typename __TypeInfoT<__T>::__fetch_t;\n" " *__ptr = __convert<__T, __FetchT>::__run(\n" " __tex_fetch_v4<__op>::template __run<__FetchT>(\n" " __tex_handle_to_obj(__handle), __args...));\n" "}\n" "\n" "// __nv_tex_surf_handler (\"__tex...\", &type_dummy, &ret, texture<...>, args...);\n" "// cudaReadModeNormalizedFloat fetches always return float4.\n" "template \n" "__device__ static void\n" "__tex_fetch(__DataT *, __RetT *__ptr,\n" " texture<__DataT, __TexT, cudaReadModeNormalizedFloat> __handle,\n" " __Args... __args) {\n" " using __FetchT = typename __TypeInfoT<__DataT>::__fetch_t;\n" " *__ptr = __convert<__RetT, float4>::__run(\n" " __tex_fetch_v4<__op>::template __run<__FetchT>(\n" " __tex_handle_to_obj(__handle), __args...));\n" "}\n" "\n" "// __nv_tex_surf_handler (\"__tex...\", &type_dummy, &ret, texture<...>, args...);\n" "// For cudaReadModeElementType fetch return type is based on type_dummy.\n" "template \n" "__device__ static void\n" "__tex_fetch(__DataT *, __RetT *__ptr,\n" " texture<__DataT, __TexT, cudaReadModeElementType> __handle,\n" " __Args... __args) {\n" " using __FetchT = typename __TypeInfoT<__DataT>::__fetch_t;\n" " *__ptr = __convert<__RetT, __FetchT>::__run(\n" " __tex_fetch_v4<__op>::template __run<__FetchT>(\n" " __tex_handle_to_obj(__handle), __args...));\n" "}\n" "#endif // CUDA_VERSION\n" "} // namespace __cuda_tex\n" "} // namespace\n" "#pragma pop_macro(\"__ASM_OUT\")\n" "#pragma pop_macro(\"__ASM_OUTP\")\n" "#pragma pop_macro(\"__Args\")\n" "#pragma pop_macro(\"__ID\")\n" "#pragma pop_macro(\"__IDV\")\n" "#pragma pop_macro(\"__IMPL_2DGATHER\")\n" "#pragma pop_macro(\"__IMPL_ALIAS\")\n" "#pragma pop_macro(\"__IMPL_ALIASI\")\n" "#pragma pop_macro(\"__IMPL_F1\")\n" "#pragma pop_macro(\"__IMPL_F3\")\n" "#pragma pop_macro(\"__IMPL_F3N\")\n" "#pragma pop_macro(\"__IMPL_F3S\")\n" "#pragma pop_macro(\"__IMPL_S\")\n" "#pragma pop_macro(\"__IMPL_S3\")\n" "#pragma pop_macro(\"__IMPL_S3I\")\n" "#pragma pop_macro(\"__IMPL_S3N\")\n" "#pragma pop_macro(\"__IMPL_S3NI\")\n" "#pragma pop_macro(\"__IMPL_S3S\")\n" "#pragma pop_macro(\"__IMPL_S3SI\")\n" "#pragma pop_macro(\"__IMPL_SI\")\n" "#pragma pop_macro(\"__L\")\n" "#pragma pop_macro(\"__STRIP_PARENS\")\n" "#endif // __CLANG_CUDA_TEXTURE_INTRINSICS_H__\n" "" } , { "/builtins/__clang_hip_cmath.h" , "/*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_HIP_CMATH_H__\n" "#define __CLANG_HIP_CMATH_H__\n" "\n" "#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)\n" "#error \"This file is for HIP and OpenMP AMDGCN device compilation only.\"\n" "#endif\n" "\n" "#if !defined(__HIPCC_RTC__)\n" "#if defined(__cplusplus)\n" "#include \n" "#include \n" "#include \n" "#endif\n" "#include \n" "#include \n" "#endif // !defined(__HIPCC_RTC__)\n" "\n" "#pragma push_macro(\"__DEVICE__\")\n" "#pragma push_macro(\"__CONSTEXPR__\")\n" "#ifdef __OPENMP_AMDGCN__\n" "#define __DEVICE__ static __attribute__((always_inline, nothrow))\n" "#define __CONSTEXPR__ constexpr\n" "#else\n" "#define __DEVICE__ static __device__ inline __attribute__((always_inline))\n" "#define __CONSTEXPR__\n" "#endif // __OPENMP_AMDGCN__\n" "\n" "// Start with functions that cannot be defined by DEF macros below.\n" "#if defined(__cplusplus)\n" "#if defined __OPENMP_AMDGCN__\n" "__DEVICE__ __CONSTEXPR__ float fabs(float __x) { return ::fabsf(__x); }\n" "__DEVICE__ __CONSTEXPR__ float sin(float __x) { return ::sinf(__x); }\n" "__DEVICE__ __CONSTEXPR__ float cos(float __x) { return ::cosf(__x); }\n" "#endif\n" "__DEVICE__ __CONSTEXPR__ double abs(double __x) { return ::fabs(__x); }\n" "__DEVICE__ __CONSTEXPR__ float abs(float __x) { return ::fabsf(__x); }\n" "__DEVICE__ __CONSTEXPR__ long long abs(long long __n) { return ::llabs(__n); }\n" "__DEVICE__ __CONSTEXPR__ long abs(long __n) { return ::labs(__n); }\n" "__DEVICE__ __CONSTEXPR__ float fma(float __x, float __y, float __z) {\n" " return ::fmaf(__x, __y, __z);\n" "}\n" "#if !defined(__HIPCC_RTC__)\n" "// The value returned by fpclassify is platform dependent, therefore it is not\n" "// supported by hipRTC.\n" "__DEVICE__ __CONSTEXPR__ int fpclassify(float __x) {\n" " return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,\n" " FP_ZERO, __x);\n" "}\n" "__DEVICE__ __CONSTEXPR__ int fpclassify(double __x) {\n" " return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,\n" " FP_ZERO, __x);\n" "}\n" "#endif // !defined(__HIPCC_RTC__)\n" "\n" "__DEVICE__ __CONSTEXPR__ float frexp(float __arg, int *__exp) {\n" " return ::frexpf(__arg, __exp);\n" "}\n" "\n" "#if defined(__OPENMP_AMDGCN__)\n" "// For OpenMP we work around some old system headers that have non-conforming\n" "// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do\n" "// this by providing two versions of these functions, differing only in the\n" "// return type. To avoid conflicting definitions we disable implicit base\n" "// function generation. That means we will end up with two specializations, one\n" "// per type, but only one has a base function defined by the system header.\n" "#pragma omp begin declare variant match( \\\n" " implementation = {extension(disable_implicit_base)})\n" "\n" "// FIXME: We lack an extension to customize the mangling of the variants, e.g.,\n" "// add a suffix. This means we would clash with the names of the variants\n" "// (note that we do not create implicit base functions here). To avoid\n" "// this clash we add a new trait to some of them that is always true\n" "// (this is LLVM after all ;)). It will only influence the mangled name\n" "// of the variants inside the inner region and avoid the clash.\n" "#pragma omp begin declare variant match(implementation = {vendor(llvm)})\n" "\n" "__DEVICE__ __CONSTEXPR__ int isinf(float __x) { return ::__isinff(__x); }\n" "__DEVICE__ __CONSTEXPR__ int isinf(double __x) { return ::__isinf(__x); }\n" "__DEVICE__ __CONSTEXPR__ int isfinite(float __x) { return ::__finitef(__x); }\n" "__DEVICE__ __CONSTEXPR__ int isfinite(double __x) { return ::__finite(__x); }\n" "__DEVICE__ __CONSTEXPR__ int isnan(float __x) { return ::__isnanf(__x); }\n" "__DEVICE__ __CONSTEXPR__ int isnan(double __x) { return ::__isnan(__x); }\n" "\n" "#pragma omp end declare variant\n" "#endif // defined(__OPENMP_AMDGCN__)\n" "\n" "__DEVICE__ __CONSTEXPR__ bool isinf(float __x) { return ::__isinff(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool isinf(double __x) { return ::__isinf(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool isfinite(float __x) { return ::__finitef(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool isfinite(double __x) { return ::__finite(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool isnan(float __x) { return ::__isnanf(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool isnan(double __x) { return ::__isnan(__x); }\n" "\n" "#if defined(__OPENMP_AMDGCN__)\n" "#pragma omp end declare variant\n" "#endif // defined(__OPENMP_AMDGCN__)\n" "\n" "__DEVICE__ __CONSTEXPR__ bool isgreater(float __x, float __y) {\n" " return __builtin_isgreater(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isgreater(double __x, double __y) {\n" " return __builtin_isgreater(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isgreaterequal(float __x, float __y) {\n" " return __builtin_isgreaterequal(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isgreaterequal(double __x, double __y) {\n" " return __builtin_isgreaterequal(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isless(float __x, float __y) {\n" " return __builtin_isless(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isless(double __x, double __y) {\n" " return __builtin_isless(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool islessequal(float __x, float __y) {\n" " return __builtin_islessequal(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool islessequal(double __x, double __y) {\n" " return __builtin_islessequal(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool islessgreater(float __x, float __y) {\n" " return __builtin_islessgreater(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool islessgreater(double __x, double __y) {\n" " return __builtin_islessgreater(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isnormal(float __x) {\n" " return __builtin_isnormal(__x);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isnormal(double __x) {\n" " return __builtin_isnormal(__x);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isunordered(float __x, float __y) {\n" " return __builtin_isunordered(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool isunordered(double __x, double __y) {\n" " return __builtin_isunordered(__x, __y);\n" "}\n" "__DEVICE__ __CONSTEXPR__ float modf(float __x, float *__iptr) {\n" " return ::modff(__x, __iptr);\n" "}\n" "__DEVICE__ __CONSTEXPR__ float pow(float __base, int __iexp) {\n" " return ::powif(__base, __iexp);\n" "}\n" "__DEVICE__ __CONSTEXPR__ double pow(double __base, int __iexp) {\n" " return ::powi(__base, __iexp);\n" "}\n" "__DEVICE__ __CONSTEXPR__ float remquo(float __x, float __y, int *__quo) {\n" " return ::remquof(__x, __y, __quo);\n" "}\n" "__DEVICE__ __CONSTEXPR__ float scalbln(float __x, long int __n) {\n" " return ::scalblnf(__x, __n);\n" "}\n" "__DEVICE__ __CONSTEXPR__ bool signbit(float __x) { return ::__signbitf(__x); }\n" "__DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }\n" "\n" "// Notably missing above is nexttoward. We omit it because\n" "// ocml doesn't provide an implementation, and we don't want to be in the\n" "// business of implementing tricky libm functions in this header.\n" "\n" "// Other functions.\n" "__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,\n" " _Float16 __z) {\n" " return __builtin_fmaf16(__x, __y, __z);\n" "}\n" "__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {\n" " return __ocml_pown_f16(__base, __iexp);\n" "}\n" "\n" "#ifndef __OPENMP_AMDGCN__\n" "// BEGIN DEF_FUN and HIP_OVERLOAD\n" "\n" "// BEGIN DEF_FUN\n" "\n" "#pragma push_macro(\"__DEF_FUN1\")\n" "#pragma push_macro(\"__DEF_FUN2\")\n" "#pragma push_macro(\"__DEF_FUN2_FI\")\n" "\n" "// Define cmath functions with float argument and returns __retty.\n" "#define __DEF_FUN1(__retty, __func) \\\n" " __DEVICE__ __CONSTEXPR__ __retty __func(float __x) { return __func##f(__x); }\n" "\n" "// Define cmath functions with two float arguments and returns __retty.\n" "#define __DEF_FUN2(__retty, __func) \\\n" " __DEVICE__ __CONSTEXPR__ __retty __func(float __x, float __y) { \\\n" " return __func##f(__x, __y); \\\n" " }\n" "\n" "// Define cmath functions with a float and an int argument and returns __retty.\n" "#define __DEF_FUN2_FI(__retty, __func) \\\n" " __DEVICE__ __CONSTEXPR__ __retty __func(float __x, int __y) { \\\n" " return __func##f(__x, __y); \\\n" " }\n" "\n" "__DEF_FUN1(float, acos)\n" "__DEF_FUN1(float, acosh)\n" "__DEF_FUN1(float, asin)\n" "__DEF_FUN1(float, asinh)\n" "__DEF_FUN1(float, atan)\n" "__DEF_FUN2(float, atan2)\n" "__DEF_FUN1(float, atanh)\n" "__DEF_FUN1(float, cbrt)\n" "__DEF_FUN1(float, ceil)\n" "__DEF_FUN2(float, copysign)\n" "__DEF_FUN1(float, cos)\n" "__DEF_FUN1(float, cosh)\n" "__DEF_FUN1(float, erf)\n" "__DEF_FUN1(float, erfc)\n" "__DEF_FUN1(float, exp)\n" "__DEF_FUN1(float, exp2)\n" "__DEF_FUN1(float, expm1)\n" "__DEF_FUN1(float, fabs)\n" "__DEF_FUN2(float, fdim)\n" "__DEF_FUN1(float, floor)\n" "__DEF_FUN2(float, fmax)\n" "__DEF_FUN2(float, fmin)\n" "__DEF_FUN2(float, fmod)\n" "__DEF_FUN2(float, hypot)\n" "__DEF_FUN1(int, ilogb)\n" "__DEF_FUN2_FI(float, ldexp)\n" "__DEF_FUN1(float, lgamma)\n" "__DEF_FUN1(float, log)\n" "__DEF_FUN1(float, log10)\n" "__DEF_FUN1(float, log1p)\n" "__DEF_FUN1(float, log2)\n" "__DEF_FUN1(float, logb)\n" "__DEF_FUN1(long long, llrint)\n" "__DEF_FUN1(long long, llround)\n" "__DEF_FUN1(long, lrint)\n" "__DEF_FUN1(long, lround)\n" "__DEF_FUN1(float, nearbyint)\n" "__DEF_FUN2(float, nextafter)\n" "__DEF_FUN2(float, pow)\n" "__DEF_FUN2(float, remainder)\n" "__DEF_FUN1(float, rint)\n" "__DEF_FUN1(float, round)\n" "__DEF_FUN2_FI(float, scalbn)\n" "__DEF_FUN1(float, sin)\n" "__DEF_FUN1(float, sinh)\n" "__DEF_FUN1(float, sqrt)\n" "__DEF_FUN1(float, tan)\n" "__DEF_FUN1(float, tanh)\n" "__DEF_FUN1(float, tgamma)\n" "__DEF_FUN1(float, trunc)\n" "\n" "#pragma pop_macro(\"__DEF_FUN1\")\n" "#pragma pop_macro(\"__DEF_FUN2\")\n" "#pragma pop_macro(\"__DEF_FUN2_FI\")\n" "\n" "// END DEF_FUN\n" "\n" "// BEGIN HIP_OVERLOAD\n" "\n" "#pragma push_macro(\"__HIP_OVERLOAD1\")\n" "#pragma push_macro(\"__HIP_OVERLOAD2\")\n" "\n" "// __hip_enable_if::type is a type function which returns __T if __B is true.\n" "template struct __hip_enable_if {};\n" "\n" "template struct __hip_enable_if { typedef __T type; };\n" "\n" "namespace __hip {\n" "template struct is_integral {\n" " enum { value = 0 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_integral {\n" " enum { value = 1 };\n" "};\n" "\n" "// ToDo: specializes is_arithmetic<_Float16>\n" "template struct is_arithmetic {\n" " enum { value = 0 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "template <> struct is_arithmetic {\n" " enum { value = 1 };\n" "};\n" "\n" "struct true_type {\n" " static const __constant__ bool value = true;\n" "};\n" "struct false_type {\n" " static const __constant__ bool value = false;\n" "};\n" "\n" "template struct is_same : public false_type {};\n" "template struct is_same<__T, __T> : public true_type {};\n" "\n" "template struct add_rvalue_reference { typedef __T &&type; };\n" "\n" "template typename add_rvalue_reference<__T>::type declval();\n" "\n" "// decltype is only available in C++11 and above.\n" "#if __cplusplus >= 201103L\n" "// __hip_promote\n" "template struct __numeric_type {\n" " static void __test(...);\n" " static _Float16 __test(_Float16);\n" " static float __test(float);\n" " static double __test(char);\n" " static double __test(int);\n" " static double __test(unsigned);\n" " static double __test(long);\n" " static double __test(unsigned long);\n" " static double __test(long long);\n" " static double __test(unsigned long long);\n" " static double __test(double);\n" " // No support for long double, use double instead.\n" " static double __test(long double);\n" "\n" " typedef decltype(__test(declval<_Tp>())) type;\n" " static const bool value = !is_same::value;\n" "};\n" "\n" "template <> struct __numeric_type { static const bool value = true; };\n" "\n" "template ::value &&__numeric_type<_A2>::value\n" " &&__numeric_type<_A3>::value>\n" "class __promote_imp {\n" "public:\n" " static const bool value = false;\n" "};\n" "\n" "template \n" "class __promote_imp<_A1, _A2, _A3, true> {\n" "private:\n" " typedef typename __promote_imp<_A1>::type __type1;\n" " typedef typename __promote_imp<_A2>::type __type2;\n" " typedef typename __promote_imp<_A3>::type __type3;\n" "\n" "public:\n" " typedef decltype(__type1() + __type2() + __type3()) type;\n" " static const bool value = true;\n" "};\n" "\n" "template class __promote_imp<_A1, _A2, void, true> {\n" "private:\n" " typedef typename __promote_imp<_A1>::type __type1;\n" " typedef typename __promote_imp<_A2>::type __type2;\n" "\n" "public:\n" " typedef decltype(__type1() + __type2()) type;\n" " static const bool value = true;\n" "};\n" "\n" "template class __promote_imp<_A1, void, void, true> {\n" "public:\n" " typedef typename __numeric_type<_A1>::type type;\n" " static const bool value = true;\n" "};\n" "\n" "template \n" "class __promote : public __promote_imp<_A1, _A2, _A3> {};\n" "#endif //__cplusplus >= 201103L\n" "} // namespace __hip\n" "\n" "// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to\n" "// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with\n" "// floor(double).\n" "#define __HIP_OVERLOAD1(__retty, __fn) \\\n" " template \\\n" " __DEVICE__ __CONSTEXPR__ \\\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, __retty>::type \\\n" " __fn(__T __x) { \\\n" " return ::__fn((double)__x); \\\n" " }\n" "\n" "// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double\n" "// or integer argument to avoid compilation error due to ambibuity. e.g.\n" "// max(5.0f, 6.0) is resolved with max(double, double).\n" "#if __cplusplus >= 201103L\n" "#define __HIP_OVERLOAD2(__retty, __fn) \\\n" " template \\\n" " __DEVICE__ __CONSTEXPR__ typename __hip_enable_if< \\\n" " __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value, \\\n" " typename __hip::__promote<__T1, __T2>::type>::type \\\n" " __fn(__T1 __x, __T2 __y) { \\\n" " typedef typename __hip::__promote<__T1, __T2>::type __result_type; \\\n" " return __fn((__result_type)__x, (__result_type)__y); \\\n" " }\n" "#else\n" "#define __HIP_OVERLOAD2(__retty, __fn) \\\n" " template \\\n" " __DEVICE__ __CONSTEXPR__ \\\n" " typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && \\\n" " __hip::is_arithmetic<__T2>::value, \\\n" " __retty>::type \\\n" " __fn(__T1 __x, __T2 __y) { \\\n" " return __fn((double)__x, (double)__y); \\\n" " }\n" "#endif\n" "\n" "__HIP_OVERLOAD1(double, acos)\n" "__HIP_OVERLOAD1(double, acosh)\n" "__HIP_OVERLOAD1(double, asin)\n" "__HIP_OVERLOAD1(double, asinh)\n" "__HIP_OVERLOAD1(double, atan)\n" "__HIP_OVERLOAD2(double, atan2)\n" "__HIP_OVERLOAD1(double, atanh)\n" "__HIP_OVERLOAD1(double, cbrt)\n" "__HIP_OVERLOAD1(double, ceil)\n" "__HIP_OVERLOAD2(double, copysign)\n" "__HIP_OVERLOAD1(double, cos)\n" "__HIP_OVERLOAD1(double, cosh)\n" "__HIP_OVERLOAD1(double, erf)\n" "__HIP_OVERLOAD1(double, erfc)\n" "__HIP_OVERLOAD1(double, exp)\n" "__HIP_OVERLOAD1(double, exp2)\n" "__HIP_OVERLOAD1(double, expm1)\n" "__HIP_OVERLOAD1(double, fabs)\n" "__HIP_OVERLOAD2(double, fdim)\n" "__HIP_OVERLOAD1(double, floor)\n" "__HIP_OVERLOAD2(double, fmax)\n" "__HIP_OVERLOAD2(double, fmin)\n" "__HIP_OVERLOAD2(double, fmod)\n" "#if !defined(__HIPCC_RTC__)\n" "__HIP_OVERLOAD1(int, fpclassify)\n" "#endif // !defined(__HIPCC_RTC__)\n" "__HIP_OVERLOAD2(double, hypot)\n" "__HIP_OVERLOAD1(int, ilogb)\n" "__HIP_OVERLOAD1(bool, isfinite)\n" "__HIP_OVERLOAD2(bool, isgreater)\n" "__HIP_OVERLOAD2(bool, isgreaterequal)\n" "__HIP_OVERLOAD1(bool, isinf)\n" "__HIP_OVERLOAD2(bool, isless)\n" "__HIP_OVERLOAD2(bool, islessequal)\n" "__HIP_OVERLOAD2(bool, islessgreater)\n" "__HIP_OVERLOAD1(bool, isnan)\n" "__HIP_OVERLOAD1(bool, isnormal)\n" "__HIP_OVERLOAD2(bool, isunordered)\n" "__HIP_OVERLOAD1(double, lgamma)\n" "__HIP_OVERLOAD1(double, log)\n" "__HIP_OVERLOAD1(double, log10)\n" "__HIP_OVERLOAD1(double, log1p)\n" "__HIP_OVERLOAD1(double, log2)\n" "__HIP_OVERLOAD1(double, logb)\n" "__HIP_OVERLOAD1(long long, llrint)\n" "__HIP_OVERLOAD1(long long, llround)\n" "__HIP_OVERLOAD1(long, lrint)\n" "__HIP_OVERLOAD1(long, lround)\n" "__HIP_OVERLOAD1(double, nearbyint)\n" "__HIP_OVERLOAD2(double, nextafter)\n" "__HIP_OVERLOAD2(double, pow)\n" "__HIP_OVERLOAD2(double, remainder)\n" "__HIP_OVERLOAD1(double, rint)\n" "__HIP_OVERLOAD1(double, round)\n" "__HIP_OVERLOAD1(bool, signbit)\n" "__HIP_OVERLOAD1(double, sin)\n" "__HIP_OVERLOAD1(double, sinh)\n" "__HIP_OVERLOAD1(double, sqrt)\n" "__HIP_OVERLOAD1(double, tan)\n" "__HIP_OVERLOAD1(double, tanh)\n" "__HIP_OVERLOAD1(double, tgamma)\n" "__HIP_OVERLOAD1(double, trunc)\n" "\n" "// Overload these but don't add them to std, they are not part of cmath.\n" "__HIP_OVERLOAD2(double, max)\n" "__HIP_OVERLOAD2(double, min)\n" "\n" "// Additional Overloads that don't quite match HIP_OVERLOAD.\n" "#if __cplusplus >= 201103L\n" "template \n" "__DEVICE__ __CONSTEXPR__ typename __hip_enable_if<\n" " __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value &&\n" " __hip::is_arithmetic<__T3>::value,\n" " typename __hip::__promote<__T1, __T2, __T3>::type>::type\n" "fma(__T1 __x, __T2 __y, __T3 __z) {\n" " typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type;\n" " return ::fma((__result_type)__x, (__result_type)__y, (__result_type)__z);\n" "}\n" "#else\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&\n" " __hip::is_arithmetic<__T2>::value &&\n" " __hip::is_arithmetic<__T3>::value,\n" " double>::type\n" " fma(__T1 __x, __T2 __y, __T3 __z) {\n" " return ::fma((double)__x, (double)__y, (double)__z);\n" "}\n" "#endif\n" "\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type\n" " frexp(__T __x, int *__exp) {\n" " return ::frexp((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type\n" " ldexp(__T __x, int __exp) {\n" " return ::ldexp((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type\n" " modf(__T __x, double *__exp) {\n" " return ::modf((double)__x, __exp);\n" "}\n" "\n" "#if __cplusplus >= 201103L\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&\n" " __hip::is_arithmetic<__T2>::value,\n" " typename __hip::__promote<__T1, __T2>::type>::type\n" " remquo(__T1 __x, __T2 __y, int *__quo) {\n" " typedef typename __hip::__promote<__T1, __T2>::type __result_type;\n" " return ::remquo((__result_type)__x, (__result_type)__y, __quo);\n" "}\n" "#else\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&\n" " __hip::is_arithmetic<__T2>::value,\n" " double>::type\n" " remquo(__T1 __x, __T2 __y, int *__quo) {\n" " return ::remquo((double)__x, (double)__y, __quo);\n" "}\n" "#endif\n" "\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type\n" " scalbln(__T __x, long int __exp) {\n" " return ::scalbln((double)__x, __exp);\n" "}\n" "\n" "template \n" "__DEVICE__ __CONSTEXPR__\n" " typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type\n" " scalbn(__T __x, int __exp) {\n" " return ::scalbn((double)__x, __exp);\n" "}\n" "\n" "#pragma pop_macro(\"__HIP_OVERLOAD1\")\n" "#pragma pop_macro(\"__HIP_OVERLOAD2\")\n" "\n" "// END HIP_OVERLOAD\n" "\n" "// END DEF_FUN and HIP_OVERLOAD\n" "\n" "#endif // ifndef __OPENMP_AMDGCN__\n" "#endif // defined(__cplusplus)\n" "\n" "#ifndef __OPENMP_AMDGCN__\n" "// Define these overloads inside the namespace our standard library uses.\n" "#if !defined(__HIPCC_RTC__)\n" "#ifdef _LIBCPP_BEGIN_NAMESPACE_STD\n" "_LIBCPP_BEGIN_NAMESPACE_STD\n" "#else\n" "namespace std {\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "#endif // _LIBCPP_BEGIN_NAMESPACE_STD\n" "\n" "// Pull the new overloads we defined above into namespace std.\n" "// using ::abs; - This may be considered for C++.\n" "using ::acos;\n" "using ::acosh;\n" "using ::asin;\n" "using ::asinh;\n" "using ::atan;\n" "using ::atan2;\n" "using ::atanh;\n" "using ::cbrt;\n" "using ::ceil;\n" "using ::copysign;\n" "using ::cos;\n" "using ::cosh;\n" "using ::erf;\n" "using ::erfc;\n" "using ::exp;\n" "using ::exp2;\n" "using ::expm1;\n" "using ::fabs;\n" "using ::fdim;\n" "using ::floor;\n" "using ::fma;\n" "using ::fmax;\n" "using ::fmin;\n" "using ::fmod;\n" "using ::fpclassify;\n" "using ::frexp;\n" "using ::hypot;\n" "using ::ilogb;\n" "using ::isfinite;\n" "using ::isgreater;\n" "using ::isgreaterequal;\n" "using ::isless;\n" "using ::islessequal;\n" "using ::islessgreater;\n" "using ::isnormal;\n" "using ::isunordered;\n" "using ::ldexp;\n" "using ::lgamma;\n" "using ::llrint;\n" "using ::llround;\n" "using ::log;\n" "using ::log10;\n" "using ::log1p;\n" "using ::log2;\n" "using ::logb;\n" "using ::lrint;\n" "using ::lround;\n" "using ::modf;\n" "// using ::nan; - This may be considered for C++.\n" "// using ::nanf; - This may be considered for C++.\n" "// using ::nanl; - This is not yet defined.\n" "using ::nearbyint;\n" "using ::nextafter;\n" "// using ::nexttoward; - Omit this since we do not have a definition.\n" "using ::pow;\n" "using ::remainder;\n" "using ::remquo;\n" "using ::rint;\n" "using ::round;\n" "using ::scalbln;\n" "using ::scalbn;\n" "using ::signbit;\n" "using ::sin;\n" "using ::sinh;\n" "using ::sqrt;\n" "using ::tan;\n" "using ::tanh;\n" "using ::tgamma;\n" "using ::trunc;\n" "\n" "// Well this is fun: We need to pull these symbols in for libc++, but we can't\n" "// pull them in with libstdc++, because its ::isinf and ::isnan are different\n" "// than its std::isinf and std::isnan.\n" "#ifndef __GLIBCXX__\n" "using ::isinf;\n" "using ::isnan;\n" "#endif\n" "\n" "// Finally, pull the \"foobarf\" functions that HIP defines into std.\n" "using ::acosf;\n" "using ::acoshf;\n" "using ::asinf;\n" "using ::asinhf;\n" "using ::atan2f;\n" "using ::atanf;\n" "using ::atanhf;\n" "using ::cbrtf;\n" "using ::ceilf;\n" "using ::copysignf;\n" "using ::cosf;\n" "using ::coshf;\n" "using ::erfcf;\n" "using ::erff;\n" "using ::exp2f;\n" "using ::expf;\n" "using ::expm1f;\n" "using ::fabsf;\n" "using ::fdimf;\n" "using ::floorf;\n" "using ::fmaf;\n" "using ::fmaxf;\n" "using ::fminf;\n" "using ::fmodf;\n" "using ::frexpf;\n" "using ::hypotf;\n" "using ::ilogbf;\n" "using ::ldexpf;\n" "using ::lgammaf;\n" "using ::llrintf;\n" "using ::llroundf;\n" "using ::log10f;\n" "using ::log1pf;\n" "using ::log2f;\n" "using ::logbf;\n" "using ::logf;\n" "using ::lrintf;\n" "using ::lroundf;\n" "using ::modff;\n" "using ::nearbyintf;\n" "using ::nextafterf;\n" "// using ::nexttowardf; - Omit this since we do not have a definition.\n" "using ::powf;\n" "using ::remainderf;\n" "using ::remquof;\n" "using ::rintf;\n" "using ::roundf;\n" "using ::scalblnf;\n" "using ::scalbnf;\n" "using ::sinf;\n" "using ::sinhf;\n" "using ::sqrtf;\n" "using ::tanf;\n" "using ::tanhf;\n" "using ::tgammaf;\n" "using ::truncf;\n" "\n" "#ifdef _LIBCPP_END_NAMESPACE_STD\n" "_LIBCPP_END_NAMESPACE_STD\n" "#else\n" "#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "_GLIBCXX_END_NAMESPACE_VERSION\n" "#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION\n" "} // namespace std\n" "#endif // _LIBCPP_END_NAMESPACE_STD\n" "#endif // !defined(__HIPCC_RTC__)\n" "\n" "// Define device-side math functions from on MSVC.\n" "#if !defined(__HIPCC_RTC__)\n" "#if defined(_MSC_VER)\n" "\n" "// Before VS2019, `` is also included in `` and other headers.\n" "// But, from VS2019, it's only included in ``. Need to include\n" "// `` here to ensure C functions declared there won't be markded as\n" "// `__host__` and `__device__` through `` wrapper.\n" "#include \n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif // defined(__cplusplus)\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Cosh(double x,\n" " double y) {\n" " return cosh(x) * y;\n" "}\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FCosh(float x,\n" " float y) {\n" " return coshf(x) * y;\n" "}\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _Dtest(double *p) {\n" " return fpclassify(*p);\n" "}\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _FDtest(float *p) {\n" " return fpclassify(*p);\n" "}\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Sinh(double x,\n" " double y) {\n" " return sinh(x) * y;\n" "}\n" "__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FSinh(float x,\n" " float y) {\n" " return sinhf(x) * y;\n" "}\n" "#if defined(__cplusplus)\n" "}\n" "#endif // defined(__cplusplus)\n" "#endif // defined(_MSC_VER)\n" "#endif // !defined(__HIPCC_RTC__)\n" "#endif // ifndef __OPENMP_AMDGCN__\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "#pragma pop_macro(\"__CONSTEXPR__\")\n" "\n" "#endif // __CLANG_HIP_CMATH_H__\n" "" } , { "/builtins/__clang_hip_libdevice_declares.h" , "/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__\n" "#define __CLANG_HIP_LIBDEVICE_DECLARES_H__\n" "\n" "#if !defined(__HIPCC_RTC__) && __has_include(\"hip/hip_version.h\")\n" "#include \"hip/hip_version.h\"\n" "#endif // __has_include(\"hip/hip_version.h\")\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "// BEGIN FLOAT\n" "__device__ __attribute__((const)) float __ocml_acos_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_acosh_f32(float);\n" "__device__ __attribute__((const)) float __ocml_asin_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_asinh_f32(float);\n" "__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_atan_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_atanh_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);\n" "__device__ __attribute__((const)) float __ocml_ceil_f32(float);\n" "__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,\n" " float);\n" "__device__ float __ocml_cos_f32(float);\n" "__device__ float __ocml_native_cos_f32(float);\n" "__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);\n" "__device__ float __ocml_cospi_f32(float);\n" "__device__ float __ocml_i0_f32(float);\n" "__device__ float __ocml_i1_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_erfc_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_erf_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_exp10_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_exp2_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_exp_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_expm1_f32(float);\n" "__device__ __attribute__((const)) float __ocml_fabs_f32(float);\n" "__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_floor_f32(float);\n" "__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);\n" "__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,\n" " float);\n" "__device__ float __ocml_frexp_f32(float,\n" " __attribute__((address_space(5))) int *);\n" "__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);\n" "__device__ __attribute__((const)) int __ocml_ilogb_f32(float);\n" "__device__ __attribute__((const)) int __ocml_isfinite_f32(float);\n" "__device__ __attribute__((const)) int __ocml_isinf_f32(float);\n" "__device__ __attribute__((const)) int __ocml_isnan_f32(float);\n" "__device__ float __ocml_j0_f32(float);\n" "__device__ float __ocml_j1_f32(float);\n" "__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);\n" "__device__ float __ocml_lgamma_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_log10_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_log1p_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_log2_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);\n" "__device__ __attribute__((const)) float __ocml_logb_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_log_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_native_log_f32(float);\n" "__device__ float __ocml_modf_f32(float,\n" " __attribute__((address_space(5))) float *);\n" "__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);\n" "__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,\n" " float);\n" "__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);\n" "__device__ __attribute__((pure)) float __ocml_pown_f32(float, int);\n" "__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);\n" "__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);\n" "__device__ float __ocml_remquo_f32(float, float,\n" " __attribute__((address_space(5))) int *);\n" "__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_rint_f32(float);\n" "__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,\n" " float);\n" "__device__ __attribute__((const)) float __ocml_round_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);\n" "__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);\n" "__device__ __attribute__((const)) int __ocml_signbit_f32(float);\n" "__device__ float __ocml_sincos_f32(float,\n" " __attribute__((address_space(5))) float *);\n" "__device__ float __ocml_sincospi_f32(float,\n" " __attribute__((address_space(5))) float *);\n" "__device__ float __ocml_sin_f32(float);\n" "__device__ float __ocml_native_sin_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_sinh_f32(float);\n" "__device__ float __ocml_sinpi_f32(float);\n" "__device__ __attribute__((const)) float __ocml_sqrt_f32(float);\n" "__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);\n" "__device__ float __ocml_tan_f32(float);\n" "__device__ __attribute__((pure)) float __ocml_tanh_f32(float);\n" "__device__ float __ocml_tgamma_f32(float);\n" "__device__ __attribute__((const)) float __ocml_trunc_f32(float);\n" "__device__ float __ocml_y0_f32(float);\n" "__device__ float __ocml_y1_f32(float);\n" "\n" "// BEGIN INTRINSICS\n" "__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);\n" "__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float);\n" "__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float);\n" "__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float);\n" "__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float);\n" "__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);\n" "__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);\n" "// END INTRINSICS\n" "// END FLOAT\n" "\n" "// BEGIN DOUBLE\n" "__device__ __attribute__((const)) double __ocml_acos_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_acosh_f64(double);\n" "__device__ __attribute__((const)) double __ocml_asin_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_asinh_f64(double);\n" "__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_atan_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_atanh_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);\n" "__device__ __attribute__((const)) double __ocml_ceil_f64(double);\n" "__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);\n" "__device__ double __ocml_cos_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_cosh_f64(double);\n" "__device__ double __ocml_cospi_f64(double);\n" "__device__ double __ocml_i0_f64(double);\n" "__device__ double __ocml_i1_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_erfc_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_erf_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_exp10_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_exp2_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_exp_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_expm1_f64(double);\n" "__device__ __attribute__((const)) double __ocml_fabs_f64(double);\n" "__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_floor_f64(double);\n" "__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);\n" "__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);\n" "__device__ double __ocml_frexp_f64(double,\n" " __attribute__((address_space(5))) int *);\n" "__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);\n" "__device__ __attribute__((const)) int __ocml_ilogb_f64(double);\n" "__device__ __attribute__((const)) int __ocml_isfinite_f64(double);\n" "__device__ __attribute__((const)) int __ocml_isinf_f64(double);\n" "__device__ __attribute__((const)) int __ocml_isnan_f64(double);\n" "__device__ double __ocml_j0_f64(double);\n" "__device__ double __ocml_j1_f64(double);\n" "__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);\n" "__device__ double __ocml_lgamma_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_log10_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_log1p_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_log2_f64(double);\n" "__device__ __attribute__((const)) double __ocml_logb_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_log_f64(double);\n" "__device__ double __ocml_modf_f64(double,\n" " __attribute__((address_space(5))) double *);\n" "__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);\n" "__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_len3_f64(double, double,\n" " double);\n" "__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,\n" " double);\n" "__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);\n" "__device__ __attribute__((pure)) double __ocml_pown_f64(double, int);\n" "__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);\n" "__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);\n" "__device__ double __ocml_remquo_f64(double, double,\n" " __attribute__((address_space(5))) int *);\n" "__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_rint_f64(double);\n" "__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,\n" " double);\n" "__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,\n" " double, double);\n" "__device__ __attribute__((const)) double __ocml_round_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);\n" "__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);\n" "__device__ __attribute__((const)) int __ocml_signbit_f64(double);\n" "__device__ double __ocml_sincos_f64(double,\n" " __attribute__((address_space(5))) double *);\n" "__device__ double\n" "__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);\n" "__device__ double __ocml_sin_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_sinh_f64(double);\n" "__device__ double __ocml_sinpi_f64(double);\n" "__device__ __attribute__((const)) double __ocml_sqrt_f64(double);\n" "__device__ double __ocml_tan_f64(double);\n" "__device__ __attribute__((pure)) double __ocml_tanh_f64(double);\n" "__device__ double __ocml_tgamma_f64(double);\n" "__device__ __attribute__((const)) double __ocml_trunc_f64(double);\n" "__device__ double __ocml_y0_f64(double);\n" "__device__ double __ocml_y1_f64(double);\n" "\n" "// BEGIN INTRINSICS\n" "__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);\n" "__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double);\n" "__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double);\n" "__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double);\n" "__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double);\n" "__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,\n" " double);\n" "__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,\n" " double);\n" "__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,\n" " double);\n" "__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,\n" " double);\n" "\n" "__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);\n" "__device__ _Float16 __ocml_cos_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float);\n" "__device__ __attribute__((const)) _Float16 __ocml_cvtrtp_f16_f32(float);\n" "__device__ __attribute__((const)) _Float16 __ocml_cvtrtz_f16_f32(float);\n" "__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,\n" " _Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_fmax_f16(_Float16, _Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_fmin_f16(_Float16, _Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);\n" "__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);\n" "__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);\n" "__device__ _Float16 __ocml_sin_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);\n" "__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);\n" "__device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);\n" "\n" "typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));\n" "typedef short __2i16 __attribute__((ext_vector_type(2)));\n" "\n" "// We need to match C99's bool and get an i1 in the IR.\n" "#ifdef __cplusplus\n" "typedef bool __ockl_bool;\n" "#else\n" "typedef _Bool __ockl_bool;\n" "#endif\n" "\n" "__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,\n" " float c, __ockl_bool s);\n" "__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);\n" "__device__ __2f16 __ocml_cos_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);\n" "__device__ __attribute__((const))\n" "__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);\n" "__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);\n" "__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);\n" "__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);\n" "\n" "#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560\n" "#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))\n" "#else\n" "#define __DEPRECATED_SINCE_HIP_560(X)\n" "#endif\n" "\n" "// Deprecated, should be removed when rocm releases using it are no longer\n" "// relevant.\n" "__DEPRECATED_SINCE_HIP_560(\"use ((_Float16)1.0) / \")\n" "__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) {\n" " return ((_Float16)1.0f) / x;\n" "}\n" "\n" "__DEPRECATED_SINCE_HIP_560(\"use ((__2f16)1.0) / \")\n" "__device__ inline __2f16\n" "__llvm_amdgcn_rcp_2f16(__2f16 __x)\n" "{\n" " return ((__2f16)1.0f) / __x;\n" "}\n" "\n" "#undef __DEPRECATED_SINCE_HIP_560\n" "\n" "__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);\n" "__device__ __2f16 __ocml_sin_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);\n" "__device__ __attribute__((const)) __2f16 __ocml_pown_2f16(__2f16, __2i16);\n" "\n" "#ifdef __cplusplus\n" "} // extern \"C\"\n" "#endif\n" "\n" "#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__\n" "" } , { "/builtins/__clang_hip_math.h" , "/*===---- __clang_hip_math.h - Device-side HIP math support ----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG_HIP_MATH_H__\n" "#define __CLANG_HIP_MATH_H__\n" "\n" "#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)\n" "#error \"This file is for HIP and OpenMP AMDGCN device compilation only.\"\n" "#endif\n" "\n" "#if !defined(__HIPCC_RTC__)\n" "#if defined(__cplusplus)\n" "#include \n" "#endif\n" "#include \n" "#include \n" "#ifdef __OPENMP_AMDGCN__\n" "#include \n" "#endif\n" "#endif // !defined(__HIPCC_RTC__)\n" "\n" "#pragma push_macro(\"__DEVICE__\")\n" "\n" "#ifdef __OPENMP_AMDGCN__\n" "#define __DEVICE__ static inline __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE__ static __device__ inline __attribute__((always_inline))\n" "#endif\n" "\n" "// A few functions return bool type starting only in C++11.\n" "#pragma push_macro(\"__RETURN_TYPE\")\n" "#ifdef __OPENMP_AMDGCN__\n" "#define __RETURN_TYPE int\n" "#else\n" "#if defined(__cplusplus)\n" "#define __RETURN_TYPE bool\n" "#else\n" "#define __RETURN_TYPE int\n" "#endif\n" "#endif // __OPENMP_AMDGCN__\n" "\n" "#if defined (__cplusplus) && __cplusplus < 201103L\n" "// emulate static_assert on type sizes\n" "template\n" "struct __compare_result{};\n" "template<>\n" "struct __compare_result {\n" " static const __device__ bool valid;\n" "};\n" "\n" "__DEVICE__\n" "void __suppress_unused_warning(bool b){};\n" "template \n" "__DEVICE__ void __static_assert_equal_size() {\n" " __suppress_unused_warning(__compare_result::valid);\n" "}\n" "\n" "#define __static_assert_type_size_equal(A, B) \\\n" " __static_assert_equal_size()\n" "\n" "#else\n" "#define __static_assert_type_size_equal(A,B) \\\n" " static_assert((A) == (B), \"\")\n" "\n" "#endif\n" "\n" "__DEVICE__\n" "uint64_t __make_mantissa_base8(const char *__tagp __attribute__((nonnull))) {\n" " uint64_t __r = 0;\n" " while (*__tagp != '\\0') {\n" " char __tmp = *__tagp;\n" "\n" " if (__tmp >= '0' && __tmp <= '7')\n" " __r = (__r * 8u) + __tmp - '0';\n" " else\n" " return 0;\n" "\n" " ++__tagp;\n" " }\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "uint64_t __make_mantissa_base10(const char *__tagp __attribute__((nonnull))) {\n" " uint64_t __r = 0;\n" " while (*__tagp != '\\0') {\n" " char __tmp = *__tagp;\n" "\n" " if (__tmp >= '0' && __tmp <= '9')\n" " __r = (__r * 10u) + __tmp - '0';\n" " else\n" " return 0;\n" "\n" " ++__tagp;\n" " }\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "uint64_t __make_mantissa_base16(const char *__tagp __attribute__((nonnull))) {\n" " uint64_t __r = 0;\n" " while (*__tagp != '\\0') {\n" " char __tmp = *__tagp;\n" "\n" " if (__tmp >= '0' && __tmp <= '9')\n" " __r = (__r * 16u) + __tmp - '0';\n" " else if (__tmp >= 'a' && __tmp <= 'f')\n" " __r = (__r * 16u) + __tmp - 'a' + 10;\n" " else if (__tmp >= 'A' && __tmp <= 'F')\n" " __r = (__r * 16u) + __tmp - 'A' + 10;\n" " else\n" " return 0;\n" "\n" " ++__tagp;\n" " }\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "uint64_t __make_mantissa(const char *__tagp __attribute__((nonnull))) {\n" " if (*__tagp == '0') {\n" " ++__tagp;\n" "\n" " if (*__tagp == 'x' || *__tagp == 'X')\n" " return __make_mantissa_base16(__tagp);\n" " else\n" " return __make_mantissa_base8(__tagp);\n" " }\n" "\n" " return __make_mantissa_base10(__tagp);\n" "}\n" "\n" "// BEGIN FLOAT\n" "#if defined(__cplusplus)\n" "__DEVICE__\n" "int abs(int __x) {\n" " int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "__DEVICE__\n" "long labs(long __x) {\n" " long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "__DEVICE__\n" "long long llabs(long long __x) {\n" " long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "#endif\n" "\n" "__DEVICE__\n" "float acosf(float __x) { return __ocml_acos_f32(__x); }\n" "\n" "__DEVICE__\n" "float acoshf(float __x) { return __ocml_acosh_f32(__x); }\n" "\n" "__DEVICE__\n" "float asinf(float __x) { return __ocml_asin_f32(__x); }\n" "\n" "__DEVICE__\n" "float asinhf(float __x) { return __ocml_asinh_f32(__x); }\n" "\n" "__DEVICE__\n" "float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float atanf(float __x) { return __ocml_atan_f32(__x); }\n" "\n" "__DEVICE__\n" "float atanhf(float __x) { return __ocml_atanh_f32(__x); }\n" "\n" "__DEVICE__\n" "float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }\n" "\n" "__DEVICE__\n" "float ceilf(float __x) { return __builtin_ceilf(__x); }\n" "\n" "__DEVICE__\n" "float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); }\n" "\n" "__DEVICE__\n" "float cosf(float __x) { return __ocml_cos_f32(__x); }\n" "\n" "__DEVICE__\n" "float coshf(float __x) { return __ocml_cosh_f32(__x); }\n" "\n" "__DEVICE__\n" "float cospif(float __x) { return __ocml_cospi_f32(__x); }\n" "\n" "__DEVICE__\n" "float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }\n" "\n" "__DEVICE__\n" "float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }\n" "\n" "__DEVICE__\n" "float erfcf(float __x) { return __ocml_erfc_f32(__x); }\n" "\n" "__DEVICE__\n" "float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }\n" "\n" "__DEVICE__\n" "float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }\n" "\n" "__DEVICE__\n" "float erff(float __x) { return __ocml_erf_f32(__x); }\n" "\n" "__DEVICE__\n" "float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }\n" "\n" "__DEVICE__\n" "float exp10f(float __x) { return __ocml_exp10_f32(__x); }\n" "\n" "__DEVICE__\n" "float exp2f(float __x) { return __builtin_exp2f(__x); }\n" "\n" "__DEVICE__\n" "float expf(float __x) { return __builtin_expf(__x); }\n" "\n" "__DEVICE__\n" "float expm1f(float __x) { return __ocml_expm1_f32(__x); }\n" "\n" "__DEVICE__\n" "float fabsf(float __x) { return __builtin_fabsf(__x); }\n" "\n" "__DEVICE__\n" "float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float fdividef(float __x, float __y) { return __x / __y; }\n" "\n" "__DEVICE__\n" "float floorf(float __x) { return __builtin_floorf(__x); }\n" "\n" "__DEVICE__\n" "float fmaf(float __x, float __y, float __z) {\n" " return __builtin_fmaf(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); }\n" "\n" "__DEVICE__\n" "float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); }\n" "\n" "__DEVICE__\n" "float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float frexpf(float __x, int *__nptr) {\n" " return __builtin_frexpf(__x, __nptr);\n" "}\n" "\n" "__DEVICE__\n" "float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); }\n" "\n" "__DEVICE__\n" "float j0f(float __x) { return __ocml_j0_f32(__x); }\n" "\n" "__DEVICE__\n" "float j1f(float __x) { return __ocml_j1_f32(__x); }\n" "\n" "__DEVICE__\n" "float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication\n" " // and the Miller & Brown algorithm\n" " // for linear recurrences to get O(log n) steps, but it's unclear if\n" " // it'd be beneficial in this case.\n" " if (__n == 0)\n" " return j0f(__x);\n" " if (__n == 1)\n" " return j1f(__x);\n" "\n" " float __x0 = j0f(__x);\n" " float __x1 = j1f(__x);\n" " for (int __i = 1; __i < __n; ++__i) {\n" " float __x2 = (2 * __i) / __x * __x1 - __x0;\n" " __x0 = __x1;\n" " __x1 = __x2;\n" " }\n" "\n" " return __x1;\n" "}\n" "\n" "__DEVICE__\n" "float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); }\n" "\n" "__DEVICE__\n" "float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }\n" "\n" "__DEVICE__\n" "long long int llrintf(float __x) { return __builtin_rintf(__x); }\n" "\n" "__DEVICE__\n" "long long int llroundf(float __x) { return __builtin_roundf(__x); }\n" "\n" "__DEVICE__\n" "float log10f(float __x) { return __builtin_log10f(__x); }\n" "\n" "__DEVICE__\n" "float log1pf(float __x) { return __ocml_log1p_f32(__x); }\n" "\n" "__DEVICE__\n" "float log2f(float __x) { return __builtin_log2f(__x); }\n" "\n" "__DEVICE__\n" "float logbf(float __x) { return __ocml_logb_f32(__x); }\n" "\n" "__DEVICE__\n" "float logf(float __x) { return __builtin_logf(__x); }\n" "\n" "__DEVICE__\n" "long int lrintf(float __x) { return __builtin_rintf(__x); }\n" "\n" "__DEVICE__\n" "long int lroundf(float __x) { return __builtin_roundf(__x); }\n" "\n" "__DEVICE__\n" "float modff(float __x, float *__iptr) {\n" " float __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " float __r =\n" " __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);\n" " *__iptr = __tmp;\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "float nanf(const char *__tagp __attribute__((nonnull))) {\n" " union {\n" " float val;\n" " struct ieee_float {\n" " unsigned int mantissa : 22;\n" " unsigned int quiet : 1;\n" " unsigned int exponent : 8;\n" " unsigned int sign : 1;\n" " } bits;\n" " } __tmp;\n" " __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));\n" "\n" " __tmp.bits.sign = 0u;\n" " __tmp.bits.exponent = ~0u;\n" " __tmp.bits.quiet = 1u;\n" " __tmp.bits.mantissa = __make_mantissa(__tagp);\n" "\n" " return __tmp.val;\n" "}\n" "\n" "__DEVICE__\n" "float nearbyintf(float __x) { return __builtin_nearbyintf(__x); }\n" "\n" "__DEVICE__\n" "float nextafterf(float __x, float __y) {\n" " return __ocml_nextafter_f32(__x, __y);\n" "}\n" "\n" "__DEVICE__\n" "float norm3df(float __x, float __y, float __z) {\n" " return __ocml_len3_f32(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "float norm4df(float __x, float __y, float __z, float __w) {\n" " return __ocml_len4_f32(__x, __y, __z, __w);\n" "}\n" "\n" "__DEVICE__\n" "float normcdff(float __x) { return __ocml_ncdf_f32(__x); }\n" "\n" "__DEVICE__\n" "float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }\n" "\n" "__DEVICE__\n" "float normf(int __dim,\n" " const float *__a) { // TODO: placeholder until OCML adds support.\n" " float __r = 0;\n" " while (__dim--) {\n" " __r += __a[0] * __a[0];\n" " ++__a;\n" " }\n" "\n" " return __ocml_sqrt_f32(__r);\n" "}\n" "\n" "__DEVICE__\n" "float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }\n" "\n" "__DEVICE__\n" "float remainderf(float __x, float __y) {\n" " return __ocml_remainder_f32(__x, __y);\n" "}\n" "\n" "__DEVICE__\n" "float remquof(float __x, float __y, int *__quo) {\n" " int __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " float __r = __ocml_remquo_f32(\n" " __x, __y, (__attribute__((address_space(5))) int *)&__tmp);\n" " *__quo = __tmp;\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float rintf(float __x) { return __builtin_rintf(__x); }\n" "\n" "__DEVICE__\n" "float rnorm3df(float __x, float __y, float __z) {\n" " return __ocml_rlen3_f32(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "float rnorm4df(float __x, float __y, float __z, float __w) {\n" " return __ocml_rlen4_f32(__x, __y, __z, __w);\n" "}\n" "\n" "__DEVICE__\n" "float rnormf(int __dim,\n" " const float *__a) { // TODO: placeholder until OCML adds support.\n" " float __r = 0;\n" " while (__dim--) {\n" " __r += __a[0] * __a[0];\n" " ++__a;\n" " }\n" "\n" " return __ocml_rsqrt_f32(__r);\n" "}\n" "\n" "__DEVICE__\n" "float roundf(float __x) { return __builtin_roundf(__x); }\n" "\n" "__DEVICE__\n" "float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }\n" "\n" "__DEVICE__\n" "float scalblnf(float __x, long int __n) {\n" " return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n)\n" " : __ocml_scalb_f32(__x, __n);\n" "}\n" "\n" "__DEVICE__\n" "float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); }\n" "\n" "__DEVICE__\n" "void sincosf(float __x, float *__sinptr, float *__cosptr) {\n" " float __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " *__sinptr =\n" " __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);\n" " *__cosptr = __tmp;\n" "}\n" "\n" "__DEVICE__\n" "void sincospif(float __x, float *__sinptr, float *__cosptr) {\n" " float __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " *__sinptr = __ocml_sincospi_f32(\n" " __x, (__attribute__((address_space(5))) float *)&__tmp);\n" " *__cosptr = __tmp;\n" "}\n" "\n" "__DEVICE__\n" "float sinf(float __x) { return __ocml_sin_f32(__x); }\n" "\n" "__DEVICE__\n" "float sinhf(float __x) { return __ocml_sinh_f32(__x); }\n" "\n" "__DEVICE__\n" "float sinpif(float __x) { return __ocml_sinpi_f32(__x); }\n" "\n" "__DEVICE__\n" "float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }\n" "\n" "__DEVICE__\n" "float tanf(float __x) { return __ocml_tan_f32(__x); }\n" "\n" "__DEVICE__\n" "float tanhf(float __x) { return __ocml_tanh_f32(__x); }\n" "\n" "__DEVICE__\n" "float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }\n" "\n" "__DEVICE__\n" "float truncf(float __x) { return __builtin_truncf(__x); }\n" "\n" "__DEVICE__\n" "float y0f(float __x) { return __ocml_y0_f32(__x); }\n" "\n" "__DEVICE__\n" "float y1f(float __x) { return __ocml_y1_f32(__x); }\n" "\n" "__DEVICE__\n" "float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication\n" " // and the Miller & Brown algorithm\n" " // for linear recurrences to get O(log n) steps, but it's unclear if\n" " // it'd be beneficial in this case. Placeholder until OCML adds\n" " // support.\n" " if (__n == 0)\n" " return y0f(__x);\n" " if (__n == 1)\n" " return y1f(__x);\n" "\n" " float __x0 = y0f(__x);\n" " float __x1 = y1f(__x);\n" " for (int __i = 1; __i < __n; ++__i) {\n" " float __x2 = (2 * __i) / __x * __x1 - __x0;\n" " __x0 = __x1;\n" " __x1 = __x2;\n" " }\n" "\n" " return __x1;\n" "}\n" "\n" "// BEGIN INTRINSICS\n" "\n" "__DEVICE__\n" "float __cosf(float __x) { return __ocml_native_cos_f32(__x); }\n" "\n" "__DEVICE__\n" "float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }\n" "\n" "__DEVICE__\n" "float __expf(float __x) { return __ocml_native_exp_f32(__x); }\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }\n" "__DEVICE__\n" "float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }\n" "__DEVICE__\n" "float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }\n" "__DEVICE__\n" "float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }\n" "#else\n" "__DEVICE__\n" "float __fadd_rn(float __x, float __y) { return __x + __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }\n" "__DEVICE__\n" "float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }\n" "__DEVICE__\n" "float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }\n" "__DEVICE__\n" "float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }\n" "#else\n" "__DEVICE__\n" "float __fdiv_rn(float __x, float __y) { return __x / __y; }\n" "#endif\n" "\n" "__DEVICE__\n" "float __fdividef(float __x, float __y) { return __x / __y; }\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fmaf_rd(float __x, float __y, float __z) {\n" " return __ocml_fma_rtn_f32(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "float __fmaf_rn(float __x, float __y, float __z) {\n" " return __ocml_fma_rte_f32(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "float __fmaf_ru(float __x, float __y, float __z) {\n" " return __ocml_fma_rtp_f32(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "float __fmaf_rz(float __x, float __y, float __z) {\n" " return __ocml_fma_rtz_f32(__x, __y, __z);\n" "}\n" "#else\n" "__DEVICE__\n" "float __fmaf_rn(float __x, float __y, float __z) {\n" " return __builtin_fmaf(__x, __y, __z);\n" "}\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }\n" "__DEVICE__\n" "float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }\n" "__DEVICE__\n" "float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }\n" "__DEVICE__\n" "float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }\n" "#else\n" "__DEVICE__\n" "float __fmul_rn(float __x, float __y) { return __x * __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }\n" "__DEVICE__\n" "float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }\n" "__DEVICE__\n" "float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }\n" "__DEVICE__\n" "float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }\n" "#else\n" "__DEVICE__\n" "float __frcp_rn(float __x) { return 1.0f / __x; }\n" "#endif\n" "\n" "__DEVICE__\n" "float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); }\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }\n" "__DEVICE__\n" "float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }\n" "__DEVICE__\n" "float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }\n" "__DEVICE__\n" "float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }\n" "#else\n" "__DEVICE__\n" "float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }\n" "__DEVICE__\n" "float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }\n" "__DEVICE__\n" "float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }\n" "__DEVICE__\n" "float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }\n" "#else\n" "__DEVICE__\n" "float __fsub_rn(float __x, float __y) { return __x - __y; }\n" "#endif\n" "\n" "__DEVICE__\n" "float __log10f(float __x) { return __ocml_native_log10_f32(__x); }\n" "\n" "__DEVICE__\n" "float __log2f(float __x) { return __ocml_native_log2_f32(__x); }\n" "\n" "__DEVICE__\n" "float __logf(float __x) { return __ocml_native_log_f32(__x); }\n" "\n" "__DEVICE__\n" "float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }\n" "\n" "__DEVICE__\n" "float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }\n" "\n" "__DEVICE__\n" "void __sincosf(float __x, float *__sinptr, float *__cosptr) {\n" " *__sinptr = __ocml_native_sin_f32(__x);\n" " *__cosptr = __ocml_native_cos_f32(__x);\n" "}\n" "\n" "__DEVICE__\n" "float __sinf(float __x) { return __ocml_native_sin_f32(__x); }\n" "\n" "__DEVICE__\n" "float __tanf(float __x) { return __ocml_tan_f32(__x); }\n" "// END INTRINSICS\n" "// END FLOAT\n" "\n" "// BEGIN DOUBLE\n" "__DEVICE__\n" "double acos(double __x) { return __ocml_acos_f64(__x); }\n" "\n" "__DEVICE__\n" "double acosh(double __x) { return __ocml_acosh_f64(__x); }\n" "\n" "__DEVICE__\n" "double asin(double __x) { return __ocml_asin_f64(__x); }\n" "\n" "__DEVICE__\n" "double asinh(double __x) { return __ocml_asinh_f64(__x); }\n" "\n" "__DEVICE__\n" "double atan(double __x) { return __ocml_atan_f64(__x); }\n" "\n" "__DEVICE__\n" "double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double atanh(double __x) { return __ocml_atanh_f64(__x); }\n" "\n" "__DEVICE__\n" "double cbrt(double __x) { return __ocml_cbrt_f64(__x); }\n" "\n" "__DEVICE__\n" "double ceil(double __x) { return __builtin_ceil(__x); }\n" "\n" "__DEVICE__\n" "double copysign(double __x, double __y) {\n" " return __builtin_copysign(__x, __y);\n" "}\n" "\n" "__DEVICE__\n" "double cos(double __x) { return __ocml_cos_f64(__x); }\n" "\n" "__DEVICE__\n" "double cosh(double __x) { return __ocml_cosh_f64(__x); }\n" "\n" "__DEVICE__\n" "double cospi(double __x) { return __ocml_cospi_f64(__x); }\n" "\n" "__DEVICE__\n" "double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }\n" "\n" "__DEVICE__\n" "double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }\n" "\n" "__DEVICE__\n" "double erf(double __x) { return __ocml_erf_f64(__x); }\n" "\n" "__DEVICE__\n" "double erfc(double __x) { return __ocml_erfc_f64(__x); }\n" "\n" "__DEVICE__\n" "double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }\n" "\n" "__DEVICE__\n" "double erfcx(double __x) { return __ocml_erfcx_f64(__x); }\n" "\n" "__DEVICE__\n" "double erfinv(double __x) { return __ocml_erfinv_f64(__x); }\n" "\n" "__DEVICE__\n" "double exp(double __x) { return __ocml_exp_f64(__x); }\n" "\n" "__DEVICE__\n" "double exp10(double __x) { return __ocml_exp10_f64(__x); }\n" "\n" "__DEVICE__\n" "double exp2(double __x) { return __ocml_exp2_f64(__x); }\n" "\n" "__DEVICE__\n" "double expm1(double __x) { return __ocml_expm1_f64(__x); }\n" "\n" "__DEVICE__\n" "double fabs(double __x) { return __builtin_fabs(__x); }\n" "\n" "__DEVICE__\n" "double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double floor(double __x) { return __builtin_floor(__x); }\n" "\n" "__DEVICE__\n" "double fma(double __x, double __y, double __z) {\n" " return __builtin_fma(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); }\n" "\n" "__DEVICE__\n" "double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); }\n" "\n" "__DEVICE__\n" "double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double frexp(double __x, int *__nptr) {\n" " return __builtin_frexp(__x, __nptr);\n" "}\n" "\n" "__DEVICE__\n" "double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "int ilogb(double __x) { return __ocml_ilogb_f64(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); }\n" "\n" "__DEVICE__\n" "double j0(double __x) { return __ocml_j0_f64(__x); }\n" "\n" "__DEVICE__\n" "double j1(double __x) { return __ocml_j1_f64(__x); }\n" "\n" "__DEVICE__\n" "double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication\n" " // and the Miller & Brown algorithm\n" " // for linear recurrences to get O(log n) steps, but it's unclear if\n" " // it'd be beneficial in this case. Placeholder until OCML adds\n" " // support.\n" " if (__n == 0)\n" " return j0(__x);\n" " if (__n == 1)\n" " return j1(__x);\n" "\n" " double __x0 = j0(__x);\n" " double __x1 = j1(__x);\n" " for (int __i = 1; __i < __n; ++__i) {\n" " double __x2 = (2 * __i) / __x * __x1 - __x0;\n" " __x0 = __x1;\n" " __x1 = __x2;\n" " }\n" " return __x1;\n" "}\n" "\n" "__DEVICE__\n" "double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); }\n" "\n" "__DEVICE__\n" "double lgamma(double __x) { return __ocml_lgamma_f64(__x); }\n" "\n" "__DEVICE__\n" "long long int llrint(double __x) { return __builtin_rint(__x); }\n" "\n" "__DEVICE__\n" "long long int llround(double __x) { return __builtin_round(__x); }\n" "\n" "__DEVICE__\n" "double log(double __x) { return __ocml_log_f64(__x); }\n" "\n" "__DEVICE__\n" "double log10(double __x) { return __ocml_log10_f64(__x); }\n" "\n" "__DEVICE__\n" "double log1p(double __x) { return __ocml_log1p_f64(__x); }\n" "\n" "__DEVICE__\n" "double log2(double __x) { return __ocml_log2_f64(__x); }\n" "\n" "__DEVICE__\n" "double logb(double __x) { return __ocml_logb_f64(__x); }\n" "\n" "__DEVICE__\n" "long int lrint(double __x) { return __builtin_rint(__x); }\n" "\n" "__DEVICE__\n" "long int lround(double __x) { return __builtin_round(__x); }\n" "\n" "__DEVICE__\n" "double modf(double __x, double *__iptr) {\n" " double __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " double __r =\n" " __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);\n" " *__iptr = __tmp;\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "double nan(const char *__tagp) {\n" "#if !_WIN32\n" " union {\n" " double val;\n" " struct ieee_double {\n" " uint64_t mantissa : 51;\n" " uint32_t quiet : 1;\n" " uint32_t exponent : 11;\n" " uint32_t sign : 1;\n" " } bits;\n" " } __tmp;\n" " __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));\n" "\n" " __tmp.bits.sign = 0u;\n" " __tmp.bits.exponent = ~0u;\n" " __tmp.bits.quiet = 1u;\n" " __tmp.bits.mantissa = __make_mantissa(__tagp);\n" "\n" " return __tmp.val;\n" "#else\n" " __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));\n" " uint64_t __val = __make_mantissa(__tagp);\n" " __val |= 0xFFF << 51;\n" " return *reinterpret_cast(&__val);\n" "#endif\n" "}\n" "\n" "__DEVICE__\n" "double nearbyint(double __x) { return __builtin_nearbyint(__x); }\n" "\n" "__DEVICE__\n" "double nextafter(double __x, double __y) {\n" " return __ocml_nextafter_f64(__x, __y);\n" "}\n" "\n" "__DEVICE__\n" "double norm(int __dim,\n" " const double *__a) { // TODO: placeholder until OCML adds support.\n" " double __r = 0;\n" " while (__dim--) {\n" " __r += __a[0] * __a[0];\n" " ++__a;\n" " }\n" "\n" " return __ocml_sqrt_f64(__r);\n" "}\n" "\n" "__DEVICE__\n" "double norm3d(double __x, double __y, double __z) {\n" " return __ocml_len3_f64(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "double norm4d(double __x, double __y, double __z, double __w) {\n" " return __ocml_len4_f64(__x, __y, __z, __w);\n" "}\n" "\n" "__DEVICE__\n" "double normcdf(double __x) { return __ocml_ncdf_f64(__x); }\n" "\n" "__DEVICE__\n" "double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }\n" "\n" "__DEVICE__\n" "double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }\n" "\n" "__DEVICE__\n" "double remainder(double __x, double __y) {\n" " return __ocml_remainder_f64(__x, __y);\n" "}\n" "\n" "__DEVICE__\n" "double remquo(double __x, double __y, int *__quo) {\n" " int __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " double __r = __ocml_remquo_f64(\n" " __x, __y, (__attribute__((address_space(5))) int *)&__tmp);\n" " *__quo = __tmp;\n" "\n" " return __r;\n" "}\n" "\n" "__DEVICE__\n" "double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }\n" "\n" "__DEVICE__\n" "double rint(double __x) { return __builtin_rint(__x); }\n" "\n" "__DEVICE__\n" "double rnorm(int __dim,\n" " const double *__a) { // TODO: placeholder until OCML adds support.\n" " double __r = 0;\n" " while (__dim--) {\n" " __r += __a[0] * __a[0];\n" " ++__a;\n" " }\n" "\n" " return __ocml_rsqrt_f64(__r);\n" "}\n" "\n" "__DEVICE__\n" "double rnorm3d(double __x, double __y, double __z) {\n" " return __ocml_rlen3_f64(__x, __y, __z);\n" "}\n" "\n" "__DEVICE__\n" "double rnorm4d(double __x, double __y, double __z, double __w) {\n" " return __ocml_rlen4_f64(__x, __y, __z, __w);\n" "}\n" "\n" "__DEVICE__\n" "double round(double __x) { return __builtin_round(__x); }\n" "\n" "__DEVICE__\n" "double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }\n" "\n" "__DEVICE__\n" "double scalbln(double __x, long int __n) {\n" " return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n)\n" " : __ocml_scalb_f64(__x, __n);\n" "}\n" "__DEVICE__\n" "double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); }\n" "\n" "__DEVICE__\n" "__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); }\n" "\n" "__DEVICE__\n" "double sin(double __x) { return __ocml_sin_f64(__x); }\n" "\n" "__DEVICE__\n" "void sincos(double __x, double *__sinptr, double *__cosptr) {\n" " double __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " *__sinptr = __ocml_sincos_f64(\n" " __x, (__attribute__((address_space(5))) double *)&__tmp);\n" " *__cosptr = __tmp;\n" "}\n" "\n" "__DEVICE__\n" "void sincospi(double __x, double *__sinptr, double *__cosptr) {\n" " double __tmp;\n" "#ifdef __OPENMP_AMDGCN__\n" "#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)\n" "#endif\n" " *__sinptr = __ocml_sincospi_f64(\n" " __x, (__attribute__((address_space(5))) double *)&__tmp);\n" " *__cosptr = __tmp;\n" "}\n" "\n" "__DEVICE__\n" "double sinh(double __x) { return __ocml_sinh_f64(__x); }\n" "\n" "__DEVICE__\n" "double sinpi(double __x) { return __ocml_sinpi_f64(__x); }\n" "\n" "__DEVICE__\n" "double sqrt(double __x) { return __ocml_sqrt_f64(__x); }\n" "\n" "__DEVICE__\n" "double tan(double __x) { return __ocml_tan_f64(__x); }\n" "\n" "__DEVICE__\n" "double tanh(double __x) { return __ocml_tanh_f64(__x); }\n" "\n" "__DEVICE__\n" "double tgamma(double __x) { return __ocml_tgamma_f64(__x); }\n" "\n" "__DEVICE__\n" "double trunc(double __x) { return __builtin_trunc(__x); }\n" "\n" "__DEVICE__\n" "double y0(double __x) { return __ocml_y0_f64(__x); }\n" "\n" "__DEVICE__\n" "double y1(double __x) { return __ocml_y1_f64(__x); }\n" "\n" "__DEVICE__\n" "double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication\n" " // and the Miller & Brown algorithm\n" " // for linear recurrences to get O(log n) steps, but it's unclear if\n" " // it'd be beneficial in this case. Placeholder until OCML adds\n" " // support.\n" " if (__n == 0)\n" " return y0(__x);\n" " if (__n == 1)\n" " return y1(__x);\n" "\n" " double __x0 = y0(__x);\n" " double __x1 = y1(__x);\n" " for (int __i = 1; __i < __n; ++__i) {\n" " double __x2 = (2 * __i) / __x * __x1 - __x0;\n" " __x0 = __x1;\n" " __x1 = __x2;\n" " }\n" "\n" " return __x1;\n" "}\n" "\n" "// BEGIN INTRINSICS\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __dadd_rd(double __x, double __y) {\n" " return __ocml_add_rtn_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dadd_rn(double __x, double __y) {\n" " return __ocml_add_rte_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dadd_ru(double __x, double __y) {\n" " return __ocml_add_rtp_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dadd_rz(double __x, double __y) {\n" " return __ocml_add_rtz_f64(__x, __y);\n" "}\n" "#else\n" "__DEVICE__\n" "double __dadd_rn(double __x, double __y) { return __x + __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __ddiv_rd(double __x, double __y) {\n" " return __ocml_div_rtn_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __ddiv_rn(double __x, double __y) {\n" " return __ocml_div_rte_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __ddiv_ru(double __x, double __y) {\n" " return __ocml_div_rtp_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __ddiv_rz(double __x, double __y) {\n" " return __ocml_div_rtz_f64(__x, __y);\n" "}\n" "#else\n" "__DEVICE__\n" "double __ddiv_rn(double __x, double __y) { return __x / __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __dmul_rd(double __x, double __y) {\n" " return __ocml_mul_rtn_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dmul_rn(double __x, double __y) {\n" " return __ocml_mul_rte_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dmul_ru(double __x, double __y) {\n" " return __ocml_mul_rtp_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dmul_rz(double __x, double __y) {\n" " return __ocml_mul_rtz_f64(__x, __y);\n" "}\n" "#else\n" "__DEVICE__\n" "double __dmul_rn(double __x, double __y) { return __x * __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); }\n" "__DEVICE__\n" "double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); }\n" "__DEVICE__\n" "double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); }\n" "__DEVICE__\n" "double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); }\n" "#else\n" "__DEVICE__\n" "double __drcp_rn(double __x) { return 1.0 / __x; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }\n" "__DEVICE__\n" "double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }\n" "__DEVICE__\n" "double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }\n" "__DEVICE__\n" "double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }\n" "#else\n" "__DEVICE__\n" "double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __dsub_rd(double __x, double __y) {\n" " return __ocml_sub_rtn_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dsub_rn(double __x, double __y) {\n" " return __ocml_sub_rte_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dsub_ru(double __x, double __y) {\n" " return __ocml_sub_rtp_f64(__x, __y);\n" "}\n" "__DEVICE__\n" "double __dsub_rz(double __x, double __y) {\n" " return __ocml_sub_rtz_f64(__x, __y);\n" "}\n" "#else\n" "__DEVICE__\n" "double __dsub_rn(double __x, double __y) { return __x - __y; }\n" "#endif\n" "\n" "#if defined OCML_BASIC_ROUNDED_OPERATIONS\n" "__DEVICE__\n" "double __fma_rd(double __x, double __y, double __z) {\n" " return __ocml_fma_rtn_f64(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "double __fma_rn(double __x, double __y, double __z) {\n" " return __ocml_fma_rte_f64(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "double __fma_ru(double __x, double __y, double __z) {\n" " return __ocml_fma_rtp_f64(__x, __y, __z);\n" "}\n" "__DEVICE__\n" "double __fma_rz(double __x, double __y, double __z) {\n" " return __ocml_fma_rtz_f64(__x, __y, __z);\n" "}\n" "#else\n" "__DEVICE__\n" "double __fma_rn(double __x, double __y, double __z) {\n" " return __builtin_fma(__x, __y, __z);\n" "}\n" "#endif\n" "// END INTRINSICS\n" "// END DOUBLE\n" "\n" "// C only macros\n" "#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L\n" "#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)\n" "#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)\n" "#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)\n" "#define signbit(__x) \\\n" " _Generic((__x), float : __signbitf, double : __signbit)(__x)\n" "#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L\n" "\n" "#if defined(__cplusplus)\n" "template __DEVICE__ T min(T __arg1, T __arg2) {\n" " return (__arg1 < __arg2) ? __arg1 : __arg2;\n" "}\n" "\n" "template __DEVICE__ T max(T __arg1, T __arg2) {\n" " return (__arg1 > __arg2) ? __arg1 : __arg2;\n" "}\n" "\n" "__DEVICE__ int min(int __arg1, int __arg2) {\n" " return (__arg1 < __arg2) ? __arg1 : __arg2;\n" "}\n" "__DEVICE__ int max(int __arg1, int __arg2) {\n" " return (__arg1 > __arg2) ? __arg1 : __arg2;\n" "}\n" "\n" "__DEVICE__\n" "float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); }\n" "\n" "__DEVICE__\n" "double max(double __x, double __y) { return __builtin_fmax(__x, __y); }\n" "\n" "__DEVICE__\n" "float min(float __x, float __y) { return __builtin_fminf(__x, __y); }\n" "\n" "__DEVICE__\n" "double min(double __x, double __y) { return __builtin_fmin(__x, __y); }\n" "\n" "#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)\n" "__host__ inline static int min(int __arg1, int __arg2) {\n" " return std::min(__arg1, __arg2);\n" "}\n" "\n" "__host__ inline static int max(int __arg1, int __arg2) {\n" " return std::max(__arg1, __arg2);\n" "}\n" "#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)\n" "#endif\n" "\n" "#pragma pop_macro(\"__DEVICE__\")\n" "#pragma pop_macro(\"__RETURN_TYPE\")\n" "\n" "#endif // __CLANG_HIP_MATH_H__\n" "" } , { "/builtins/__clang_hip_runtime_wrapper.h" , "/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/*\n" " * WARNING: This header is intended to be directly -include'd by\n" " * the compiler and is not supposed to be included by users.\n" " *\n" " */\n" "\n" "#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__\n" "#define __CLANG_HIP_RUNTIME_WRAPPER_H__\n" "\n" "#if __HIP__\n" "\n" "#define __host__ __attribute__((host))\n" "#define __device__ __attribute__((device))\n" "#define __global__ __attribute__((global))\n" "#define __shared__ __attribute__((shared))\n" "#define __constant__ __attribute__((constant))\n" "#define __managed__ __attribute__((managed))\n" "\n" "#if !defined(__cplusplus) || __cplusplus < 201103L\n" " #define nullptr NULL;\n" "#endif\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" " __attribute__((__visibility__(\"default\")))\n" " __attribute__((weak))\n" " __attribute__((noreturn))\n" " __device__ void __cxa_pure_virtual(void) {\n" " __builtin_trap();\n" " }\n" " __attribute__((__visibility__(\"default\")))\n" " __attribute__((weak))\n" " __attribute__((noreturn))\n" " __device__ void __cxa_deleted_virtual(void) {\n" " __builtin_trap();\n" " }\n" "}\n" "#endif //__cplusplus\n" "\n" "#if !defined(__HIPCC_RTC__)\n" "#include \n" "#include \n" "#include \n" "#if __has_include(\"hip/hip_version.h\")\n" "#include \"hip/hip_version.h\"\n" "#endif // __has_include(\"hip/hip_version.h\")\n" "#else\n" "typedef __SIZE_TYPE__ size_t;\n" "// Define macros which are needed to declare HIP device API's without standard\n" "// C/C++ headers. This is for readability so that these API's can be written\n" "// the same way as non-hipRTC use case. These macros need to be popped so that\n" "// they do not pollute users' name space.\n" "#pragma push_macro(\"NULL\")\n" "#pragma push_macro(\"uint32_t\")\n" "#pragma push_macro(\"uint64_t\")\n" "#pragma push_macro(\"CHAR_BIT\")\n" "#pragma push_macro(\"INT_MAX\")\n" "#define NULL (void *)0\n" "#define uint32_t __UINT32_TYPE__\n" "#define uint64_t __UINT64_TYPE__\n" "#define CHAR_BIT __CHAR_BIT__\n" "#define INT_MAX __INTMAX_MAX__\n" "#endif // __HIPCC_RTC__\n" "\n" "typedef __SIZE_TYPE__ __hip_size_t;\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif //__cplusplus\n" "\n" "#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405\n" "extern \"C\" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);\n" "extern \"C\" __device__ void __ockl_dm_dealloc(unsigned long long __addr);\n" "#if __has_feature(address_sanitizer)\n" "extern \"C\" __device__ unsigned long long __asan_malloc_impl(unsigned long long __size, unsigned long long __pc);\n" "extern \"C\" __device__ void __asan_free_impl(unsigned long long __addr, unsigned long long __pc);\n" "__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) {\n" " unsigned long long __pc = (unsigned long long)__builtin_return_address(0);\n" " return (void *)__asan_malloc_impl(__size, __pc);\n" "}\n" "__attribute__((noinline, weak)) __device__ void free(void *__ptr) {\n" " unsigned long long __pc = (unsigned long long)__builtin_return_address(0);\n" " __asan_free_impl((unsigned long long)__ptr, __pc);\n" "}\n" "#else\n" "__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {\n" " return (void *) __ockl_dm_alloc(__size);\n" "}\n" "__attribute__((weak)) inline __device__ void free(void *__ptr) {\n" " __ockl_dm_dealloc((unsigned long long)__ptr);\n" "}\n" "#endif // __has_feature(address_sanitizer)\n" "#else // HIP version check\n" "#if __HIP_ENABLE_DEVICE_MALLOC__\n" "__device__ void *__hip_malloc(__hip_size_t __size);\n" "__device__ void *__hip_free(void *__ptr);\n" "__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {\n" " return __hip_malloc(__size);\n" "}\n" "__attribute__((weak)) inline __device__ void free(void *__ptr) {\n" " __hip_free(__ptr);\n" "}\n" "#else\n" "__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {\n" " __builtin_trap();\n" " return (void *)0;\n" "}\n" "__attribute__((weak)) inline __device__ void free(void *__ptr) {\n" " __builtin_trap();\n" "}\n" "#endif\n" "#endif // HIP version check\n" "\n" "#ifdef __cplusplus\n" "} // extern \"C\"\n" "#endif //__cplusplus\n" "\n" "#include <__clang_hip_libdevice_declares.h>\n" "#include <__clang_hip_math.h>\n" "#include <__clang_hip_stdlib.h>\n" "\n" "#if defined(__HIPCC_RTC__)\n" "#include <__clang_hip_cmath.h>\n" "#else\n" "#include <__clang_cuda_math_forward_declares.h>\n" "#include <__clang_hip_cmath.h>\n" "#include <__clang_cuda_complex_builtins.h>\n" "#include \n" "#include \n" "#include \n" "#endif // __HIPCC_RTC__\n" "\n" "#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1\n" "#if defined(__HIPCC_RTC__)\n" "#pragma pop_macro(\"NULL\")\n" "#pragma pop_macro(\"uint32_t\")\n" "#pragma pop_macro(\"uint64_t\")\n" "#pragma pop_macro(\"CHAR_BIT\")\n" "#pragma pop_macro(\"INT_MAX\")\n" "#endif // __HIPCC_RTC__\n" "#endif // __HIP__\n" "#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__\n" "" } , { "/builtins/__clang_hip_stdlib.h" , "/*===---- __clang_hip_stdlib.h - Device-side HIP math support --------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CLANG_HIP_STDLIB_H__\n" "\n" "#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)\n" "#error \"This file is for HIP and OpenMP AMDGCN device compilation only.\"\n" "#endif\n" "\n" "#if !defined(__cplusplus)\n" "\n" "#include \n" "\n" "#ifdef __OPENMP_AMDGCN__\n" "#define __DEVICE__ static inline __attribute__((always_inline, nothrow))\n" "#else\n" "#define __DEVICE__ static __device__ inline __attribute__((always_inline))\n" "#endif\n" "\n" "__DEVICE__\n" "int abs(int __x) {\n" " int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "__DEVICE__\n" "long labs(long __x) {\n" " long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "__DEVICE__\n" "long long llabs(long long __x) {\n" " long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);\n" " return (__x ^ __sgn) - __sgn;\n" "}\n" "\n" "#endif // !defined(__cplusplus)\n" "\n" "#endif // #define __CLANG_HIP_STDLIB_H__\n" "" } , { "/builtins/__stddef_max_align_t.h" , "/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_MAX_ALIGN_T_DEFINED\n" "#define __CLANG_MAX_ALIGN_T_DEFINED\n" "\n" "#if defined(_MSC_VER)\n" "typedef double max_align_t;\n" "#elif defined(__APPLE__)\n" "typedef long double max_align_t;\n" "#else\n" "// Define 'max_align_t' to match the GCC definition.\n" "typedef struct {\n" " long long __clang_max_align_nonce1\n" " __attribute__((__aligned__(__alignof__(long long))));\n" " long double __clang_max_align_nonce2\n" " __attribute__((__aligned__(__alignof__(long double))));\n" "} max_align_t;\n" "#endif\n" "\n" "#endif\n" "" } , { "/builtins/__wmmintrin_aes.h" , "/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __WMMINTRIN_H\n" "#error \"Never use <__wmmintrin_aes.h> directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __WMMINTRIN_AES_H\n" "#define __WMMINTRIN_AES_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"aes\"), __min_vector_width__(128)))\n" "\n" "/// Performs a single round of AES encryption using the Equivalent\n" "/// Inverse Cipher, transforming the state value from the first source\n" "/// operand using a 128-bit round key value contained in the second source\n" "/// operand, and writes the result to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VAESENC instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the state value.\n" "/// \\param __R\n" "/// A 128-bit integer vector containing the round key value.\n" "/// \\returns A 128-bit integer vector containing the encrypted value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_aesenc_si128(__m128i __V, __m128i __R)\n" "{\n" " return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R);\n" "}\n" "\n" "/// Performs the final round of AES encryption using the Equivalent\n" "/// Inverse Cipher, transforming the state value from the first source\n" "/// operand using a 128-bit round key value contained in the second source\n" "/// operand, and writes the result to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VAESENCLAST instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the state value.\n" "/// \\param __R\n" "/// A 128-bit integer vector containing the round key value.\n" "/// \\returns A 128-bit integer vector containing the encrypted value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_aesenclast_si128(__m128i __V, __m128i __R)\n" "{\n" " return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R);\n" "}\n" "\n" "/// Performs a single round of AES decryption using the Equivalent\n" "/// Inverse Cipher, transforming the state value from the first source\n" "/// operand using a 128-bit round key value contained in the second source\n" "/// operand, and writes the result to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VAESDEC instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the state value.\n" "/// \\param __R\n" "/// A 128-bit integer vector containing the round key value.\n" "/// \\returns A 128-bit integer vector containing the decrypted value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_aesdec_si128(__m128i __V, __m128i __R)\n" "{\n" " return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R);\n" "}\n" "\n" "/// Performs the final round of AES decryption using the Equivalent\n" "/// Inverse Cipher, transforming the state value from the first source\n" "/// operand using a 128-bit round key value contained in the second source\n" "/// operand, and writes the result to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VAESDECLAST instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the state value.\n" "/// \\param __R\n" "/// A 128-bit integer vector containing the round key value.\n" "/// \\returns A 128-bit integer vector containing the decrypted value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_aesdeclast_si128(__m128i __V, __m128i __R)\n" "{\n" " return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R);\n" "}\n" "\n" "/// Applies the AES InvMixColumns() transformation to an expanded key\n" "/// contained in the source operand, and writes the result to the\n" "/// destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VAESIMC instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the expanded key.\n" "/// \\returns A 128-bit integer vector containing the transformed value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_aesimc_si128(__m128i __V)\n" "{\n" " return (__m128i)__builtin_ia32_aesimc128((__v2di)__V);\n" "}\n" "\n" "/// Generates a round key for AES encryption, operating on 128-bit data\n" "/// specified in the first source operand and using an 8-bit round constant\n" "/// specified by the second source operand, and writes the result to the\n" "/// destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the AESKEYGENASSIST instruction.\n" "///\n" "/// \\param C\n" "/// A 128-bit integer vector that is used to generate the AES encryption key.\n" "/// \\param R\n" "/// An 8-bit round constant used to generate the AES encryption key.\n" "/// \\returns A 128-bit round key for AES encryption.\n" "#define _mm_aeskeygenassist_si128(C, R) \\\n" " ((__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R)))\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __WMMINTRIN_AES_H */\n" "" } , { "/builtins/__wmmintrin_pclmul.h" , "/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __WMMINTRIN_H\n" "#error \"Never use <__wmmintrin_pclmul.h> directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __WMMINTRIN_PCLMUL_H\n" "#define __WMMINTRIN_PCLMUL_H\n" "\n" "/// Multiplies two 64-bit integer values, which are selected from source\n" "/// operands using the immediate-value operand. The multiplication is a\n" "/// carry-less multiplication, and the 128-bit integer product is stored in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCLMULQDQ instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x i64] containing one of the source operands.\n" "/// \\param Y\n" "/// A 128-bit vector of [2 x i64] containing one of the source operands.\n" "/// \\param I\n" "/// An immediate value specifying which 64-bit values to select from the\n" "/// operands. Bit 0 is used to select a value from operand \\a X, and bit\n" "/// 4 is used to select a value from operand \\a Y: \\n\n" "/// Bit[0]=0 indicates that bits[63:0] of operand \\a X are used. \\n\n" "/// Bit[0]=1 indicates that bits[127:64] of operand \\a X are used. \\n\n" "/// Bit[4]=0 indicates that bits[63:0] of operand \\a Y are used. \\n\n" "/// Bit[4]=1 indicates that bits[127:64] of operand \\a Y are used.\n" "/// \\returns The 128-bit integer vector containing the result of the carry-less\n" "/// multiplication of the selected 64-bit values.\n" "#define _mm_clmulepi64_si128(X, Y, I) \\\n" " ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \\\n" " (__v2di)(__m128i)(Y), (char)(I)))\n" "\n" "#endif /* __WMMINTRIN_PCLMUL_H */\n" "" } , { "/builtins/adxintrin.h" , "/*===---- adxintrin.h - ADX intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __ADXINTRIN_H\n" "#define __ADXINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))\n" "\n" "/* Use C++ inline semantics in C++, GNU inline for C mode. */\n" "#if defined(__cplusplus)\n" "#define __INLINE __inline\n" "#else\n" "#define __INLINE static __inline\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/* Intrinsics that are available only if __ADX__ is defined. */\n" "\n" "/// Adds unsigned 32-bit integers \\a __x and \\a __y, plus 0 or 1 as indicated\n" "/// by the carry flag \\a __cf. Stores the unsigned 32-bit sum in the memory\n" "/// at \\a __p, and returns the 8-bit carry-out (carry flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store32(__p, __x + __y + temp)\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c ADCX instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// A 32-bit unsigned addend.\n" "/// \\param __y\n" "/// A 32-bit unsigned addend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the sum.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"adx\")))\n" " _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,\n" " unsigned int *__p) {\n" " return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Adds unsigned 64-bit integers \\a __x and \\a __y, plus 0 or 1 as indicated\n" "/// by the carry flag \\a __cf. Stores the unsigned 64-bit sum in the memory\n" "/// at \\a __p, and returns the 8-bit carry-out (carry flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store64(__p, __x + __y + temp)\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c ADCX instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// A 64-bit unsigned addend.\n" "/// \\param __y\n" "/// A 64-bit unsigned addend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the sum.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"adx\")))\n" " _addcarryx_u64(unsigned char __cf, unsigned long long __x,\n" " unsigned long long __y, unsigned long long *__p) {\n" " return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);\n" "}\n" "#endif\n" "\n" "/* Intrinsics that are also available if __ADX__ is undefined. */\n" "\n" "/// Adds unsigned 32-bit integers \\a __x and \\a __y, plus 0 or 1 as indicated\n" "/// by the carry flag \\a __cf. Stores the unsigned 32-bit sum in the memory\n" "/// at \\a __p, and returns the 8-bit carry-out (carry flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store32(__p, __x + __y + temp)\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c ADC instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// A 32-bit unsigned addend.\n" "/// \\param __y\n" "/// A 32-bit unsigned addend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the sum.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,\n" " unsigned int __x,\n" " unsigned int __y,\n" " unsigned int *__p) {\n" " return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Adds unsigned 64-bit integers \\a __x and \\a __y, plus 0 or 1 as indicated\n" "/// by the carry flag \\a __cf. Stores the unsigned 64-bit sum in the memory\n" "/// at \\a __p, and returns the 8-bit carry-out (carry flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store64(__p, __x + __y + temp)\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c ADC instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// A 64-bit unsigned addend.\n" "/// \\param __y\n" "/// A 64-bit unsigned addend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the sum.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char __DEFAULT_FN_ATTRS\n" "_addcarry_u64(unsigned char __cf, unsigned long long __x,\n" " unsigned long long __y, unsigned long long *__p) {\n" " return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);\n" "}\n" "#endif\n" "\n" "/// Adds unsigned 32-bit integer \\a __y to 0 or 1 as indicated by the carry\n" "/// flag \\a __cf, and subtracts the result from unsigned 32-bit integer\n" "/// \\a __x. Stores the unsigned 32-bit difference in the memory at \\a __p,\n" "/// and returns the 8-bit carry-out (carry or overflow flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store32(__p, __x - (__y + temp))\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SBB instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// The 32-bit unsigned minuend.\n" "/// \\param __y\n" "/// The 32-bit unsigned subtrahend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the difference.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,\n" " unsigned int __x,\n" " unsigned int __y,\n" " unsigned int *__p) {\n" " return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Adds unsigned 64-bit integer \\a __y to 0 or 1 as indicated by the carry\n" "/// flag \\a __cf, and subtracts the result from unsigned 64-bit integer\n" "/// \\a __x. Stores the unsigned 64-bit difference in the memory at \\a __p,\n" "/// and returns the 8-bit carry-out (carry or overflow flag).\n" "///\n" "/// \\code{.operation}\n" "/// temp := (__cf == 0) ? 0 : 1\n" "/// Store64(__p, __x - (__y + temp))\n" "/// result := CF\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c ADC instruction.\n" "///\n" "/// \\param __cf\n" "/// The 8-bit unsigned carry flag; any non-zero value indicates carry.\n" "/// \\param __x\n" "/// The 64-bit unsigned minuend.\n" "/// \\param __y\n" "/// The 64-bit unsigned subtrahend.\n" "/// \\param __p\n" "/// Pointer to memory for storing the difference.\n" "/// \\returns The 8-bit unsigned carry-out value.\n" "__INLINE unsigned char __DEFAULT_FN_ATTRS\n" "_subborrow_u64(unsigned char __cf, unsigned long long __x,\n" " unsigned long long __y, unsigned long long *__p) {\n" " return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);\n" "}\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __ADXINTRIN_H */\n" "" } , { "/builtins/ammintrin.h" , "/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __AMMINTRIN_H\n" "#define __AMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"sse4a\"), __min_vector_width__(128)))\n" "\n" "/// Extracts the specified bits from the lower 64 bits of the 128-bit\n" "/// integer vector operand at the index \\a idx and of the length \\a len.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the EXTRQ instruction.\n" "///\n" "/// \\param x\n" "/// The value from which bits are extracted.\n" "/// \\param len\n" "/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]\n" "/// are zero, the length is interpreted as 64.\n" "/// \\param idx\n" "/// Bits [5:0] specify the index of the least significant bit; the other\n" "/// bits are ignored. If the sum of the index and length is greater than 64,\n" "/// the result is undefined. If the length and index are both zero, bits\n" "/// [63:0] of parameter \\a x are extracted. If the length is zero but the\n" "/// index is non-zero, the result is undefined.\n" "/// \\returns A 128-bit integer vector whose lower 64 bits contain the bits\n" "/// extracted from the source operand.\n" "#define _mm_extracti_si64(x, len, idx) \\\n" " ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \\\n" " (char)(len), (char)(idx)))\n" "\n" "/// Extracts the specified bits from the lower 64 bits of the 128-bit\n" "/// integer vector operand at the index and of the length specified by\n" "/// \\a __y.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the EXTRQ instruction.\n" "///\n" "/// \\param __x\n" "/// The value from which bits are extracted.\n" "/// \\param __y\n" "/// Specifies the index of the least significant bit at [13:8] and the\n" "/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the\n" "/// length is interpreted as 64. If the sum of the index and length is\n" "/// greater than 64, the result is undefined. If the length and index are\n" "/// both zero, bits [63:0] of parameter \\a __x are extracted. If the length\n" "/// is zero but the index is non-zero, the result is undefined.\n" "/// \\returns A 128-bit vector whose lower 64 bits contain the bits extracted\n" "/// from the source operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_extract_si64(__m128i __x, __m128i __y)\n" "{\n" " return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);\n" "}\n" "\n" "/// Inserts bits of a specified length from the source integer vector\n" "/// \\a y into the lower 64 bits of the destination integer vector \\a x at\n" "/// the index \\a idx and of the length \\a len.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len,\n" "/// const int idx);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the INSERTQ instruction.\n" "///\n" "/// \\param x\n" "/// The destination operand where bits will be inserted. The inserted bits\n" "/// are defined by the length \\a len and by the index \\a idx specifying the\n" "/// least significant bit.\n" "/// \\param y\n" "/// The source operand containing the bits to be extracted. The extracted\n" "/// bits are the least significant bits of operand \\a y of length \\a len.\n" "/// \\param len\n" "/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]\n" "/// are zero, the length is interpreted as 64.\n" "/// \\param idx\n" "/// Bits [5:0] specify the index of the least significant bit; the other\n" "/// bits are ignored. If the sum of the index and length is greater than 64,\n" "/// the result is undefined. If the length and index are both zero, bits\n" "/// [63:0] of parameter \\a y are inserted into parameter \\a x. If the length\n" "/// is zero but the index is non-zero, the result is undefined.\n" "/// \\returns A 128-bit integer vector containing the original lower 64-bits of\n" "/// destination operand \\a x with the specified bitfields replaced by the\n" "/// lower bits of source operand \\a y. The upper 64 bits of the return value\n" "/// are undefined.\n" "#define _mm_inserti_si64(x, y, len, idx) \\\n" " ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \\\n" " (__v2di)(__m128i)(y), \\\n" " (char)(len), (char)(idx)))\n" "\n" "/// Inserts bits of a specified length from the source integer vector\n" "/// \\a __y into the lower 64 bits of the destination integer vector \\a __x\n" "/// at the index and of the length specified by \\a __y.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the INSERTQ instruction.\n" "///\n" "/// \\param __x\n" "/// The destination operand where bits will be inserted. The inserted bits\n" "/// are defined by the length and by the index of the least significant bit\n" "/// specified by operand \\a __y.\n" "/// \\param __y\n" "/// The source operand containing the bits to be extracted. The extracted\n" "/// bits are the least significant bits of operand \\a __y with length\n" "/// specified by bits [69:64]. These are inserted into the destination at the\n" "/// index specified by bits [77:72]; all other bits are ignored. If bits\n" "/// [69:64] are zero, the length is interpreted as 64. If the sum of the\n" "/// index and length is greater than 64, the result is undefined. If the\n" "/// length and index are both zero, bits [63:0] of parameter \\a __y are\n" "/// inserted into parameter \\a __x. If the length is zero but the index is\n" "/// non-zero, the result is undefined.\n" "/// \\returns A 128-bit integer vector containing the original lower 64-bits of\n" "/// destination operand \\a __x with the specified bitfields replaced by the\n" "/// lower bits of source operand \\a __y. The upper 64 bits of the return\n" "/// value are undefined.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_insert_si64(__m128i __x, __m128i __y)\n" "{\n" " return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);\n" "}\n" "\n" "/// Stores a 64-bit double-precision value in a 64-bit memory location.\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVNTSD instruction.\n" "///\n" "/// \\param __p\n" "/// The 64-bit memory location used to store the register value.\n" "/// \\param __a\n" "/// The 64-bit double-precision floating-point register value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_stream_sd(double *__p, __m128d __a)\n" "{\n" " __builtin_ia32_movntsd(__p, (__v2df)__a);\n" "}\n" "\n" "/// Stores a 32-bit single-precision floating-point value in a 32-bit\n" "/// memory location. To minimize caching, the data is flagged as\n" "/// non-temporal (unlikely to be used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVNTSS instruction.\n" "///\n" "/// \\param __p\n" "/// The 32-bit memory location used to store the register value.\n" "/// \\param __a\n" "/// The 32-bit single-precision floating-point register value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_stream_ss(float *__p, __m128 __a)\n" "{\n" " __builtin_ia32_movntss(__p, (__v4sf)__a);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __AMMINTRIN_H */\n" "" } , { "/builtins/amxcomplexintrin.h" , "/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===------------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifndef __AMX_COMPLEXINTRIN_H\n" "#define __AMX_COMPLEXINTRIN_H\n" "#ifdef __x86_64__\n" "\n" "#define __DEFAULT_FN_ATTRS_COMPLEX \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"amx-complex\")))\n" "\n" "/// Perform matrix multiplication of two tiles containing complex elements and\n" "/// accumulate the results into a packed single precision tile. Each dword\n" "/// element in input tiles \\a a and \\a b is interpreted as a complex number\n" "/// with FP16 real part and FP16 imaginary part.\n" "/// Calculates the imaginary part of the result. For each possible combination\n" "/// of (row of \\a a, column of \\a b), it performs a set of multiplication\n" "/// and accumulations on all corresponding complex numbers (one from \\a a\n" "/// and one from \\a b). The imaginary part of the \\a a element is multiplied\n" "/// with the real part of the corresponding \\a b element, and the real part\n" "/// of the \\a a element is multiplied with the imaginary part of the\n" "/// corresponding \\a b elements. The two accumulated results are added, and\n" "/// then accumulated into the corresponding row and column of \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);\n" "/// \\endcode\n" "///\n" "/// \\code{.operation}\n" "/// FOR m := 0 TO dst.rows - 1\n" "/// tmp := dst.row[m]\n" "/// FOR k := 0 TO (a.colsb / 4) - 1\n" "/// FOR n := 0 TO (dst.colsb / 4) - 1\n" "/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])\n" "/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])\n" "/// ENDFOR\n" "/// ENDFOR\n" "/// write_row_and_zero(dst, m, tmp, dst.colsb)\n" "/// ENDFOR\n" "/// zero_upper_rows(dst, dst.rows)\n" "/// zero_tileconfig_start()\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c TCMMIMFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param a\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param b\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)\n" "\n" "/// Perform matrix multiplication of two tiles containing complex elements and\n" "/// accumulate the results into a packed single precision tile. Each dword\n" "/// element in input tiles \\a a and \\a b is interpreted as a complex number\n" "/// with FP16 real part and FP16 imaginary part.\n" "/// Calculates the real part of the result. For each possible combination\n" "/// of (row of \\a a, column of \\a b), it performs a set of multiplication\n" "/// and accumulations on all corresponding complex numbers (one from \\a a\n" "/// and one from \\a b). The real part of the \\a a element is multiplied\n" "/// with the real part of the corresponding \\a b element, and the negated\n" "/// imaginary part of the \\a a element is multiplied with the imaginary\n" "/// part of the corresponding \\a b elements. The two accumulated results\n" "/// are added, and then accumulated into the corresponding row and column\n" "/// of \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);\n" "/// \\endcode\n" "///\n" "/// \\code{.operation}\n" "/// FOR m := 0 TO dst.rows - 1\n" "/// tmp := dst.row[m]\n" "/// FOR k := 0 TO (a.colsb / 4) - 1\n" "/// FOR n := 0 TO (dst.colsb / 4) - 1\n" "/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])\n" "/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])\n" "/// ENDFOR\n" "/// ENDFOR\n" "/// write_row_and_zero(dst, m, tmp, dst.colsb)\n" "/// ENDFOR\n" "/// zero_upper_rows(dst, dst.rows)\n" "/// zero_tileconfig_start()\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c TCMMIMFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param a\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param b\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)\n" "\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX\n" "_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX\n" "_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// Perform matrix multiplication of two tiles containing complex elements and\n" "/// accumulate the results into a packed single precision tile. Each dword\n" "/// element in input tiles src0 and src1 is interpreted as a complex number with\n" "/// FP16 real part and FP16 imaginary part.\n" "/// This function calculates the imaginary part of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TCMMIMFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_COMPLEX\n" "static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,\n" " dst->tile, src0.tile, src1.tile);\n" "}\n" "\n" "/// Perform matrix multiplication of two tiles containing complex elements and\n" "/// accumulate the results into a packed single precision tile. Each dword\n" "/// element in input tiles src0 and src1 is interpreted as a complex number with\n" "/// FP16 real part and FP16 imaginary part.\n" "/// This function calculates the real part of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TCMMRLFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_COMPLEX\n" "static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,\n" " dst->tile, src0.tile, src1.tile);\n" "}\n" "\n" "#endif // __x86_64__\n" "#endif // __AMX_COMPLEXINTRIN_H\n" "" } , { "/builtins/amxfp16intrin.h" , "/*===------------- amxfp16intrin.h - AMX_FP16 intrinsics -*- C++ -*---------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===------------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; use instead.\"\n" "#endif /* __IMMINTRIN_H */\n" "\n" "#ifndef __AMX_FP16INTRIN_H\n" "#define __AMX_FP16INTRIN_H\n" "#ifdef __x86_64__\n" "\n" "/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles \\a a\n" "/// and \\a b, accumulating the intermediate single-precision (32-bit)\n" "/// floating-point elements with elements in \\a dst, and store the 32-bit\n" "/// result back to tile \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// void _tile_dpfp16ps (__tile dst, __tile a, __tile b)\n" "/// \\endcode\n" "///\n" "/// \\code{.operation}\n" "/// FOR m := 0 TO dst.rows - 1\n" "/// tmp := dst.row[m]\n" "/// FOR k := 0 TO (a.colsb / 4) - 1\n" "/// FOR n := 0 TO (dst.colsb / 4) - 1\n" "/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) *\n" "/// FP32(b.row[k].fp16[2*n+0])\n" "/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) *\n" "/// FP32(b.row[k].fp16[2*n+1])\n" "/// ENDFOR\n" "/// ENDFOR\n" "/// write_row_and_zero(dst, m, tmp, dst.colsb)\n" "/// ENDFOR\n" "/// zero_upper_rows(dst, dst.rows)\n" "/// zero_tileconfig_start()\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c TDPFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param a\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param b\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpfp16ps(dst, a, b) \\\n" " __builtin_ia32_tdpfp16ps(dst, a, b)\n" "\n" "#endif /* __x86_64__ */\n" "#endif /* __AMX_FP16INTRIN_H */\n" "" } , { "/builtins/amxintrin.h" , "/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===------------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif /* __IMMINTRIN_H */\n" "\n" "#ifndef __AMXINTRIN_H\n" "#define __AMXINTRIN_H\n" "#ifdef __x86_64__\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS_TILE \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"amx-tile\")))\n" "#define __DEFAULT_FN_ATTRS_INT8 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"amx-int8\")))\n" "#define __DEFAULT_FN_ATTRS_BF16 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"amx-bf16\")))\n" "#define __DEFAULT_FN_ATTRS_FP16 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"amx-fp16\")))\n" "\n" "/// Load tile configuration from a 64-byte memory location specified by\n" "/// \"mem_addr\". The tile configuration includes the tile type palette, the\n" "/// number of bytes per row, and the number of rows. If the specified\n" "/// palette_id is zero, that signifies the init state for both the tile\n" "/// config and the tile data, and the tiles are zeroed. Any invalid\n" "/// configurations will result in #GP fault.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LDTILECFG instruction.\n" "///\n" "/// \\param __config\n" "/// A pointer to 512-bits configuration\n" "static __inline__ void __DEFAULT_FN_ATTRS_TILE\n" "_tile_loadconfig(const void *__config) {\n" " __builtin_ia32_tile_loadconfig(__config);\n" "}\n" "\n" "/// Stores the current tile configuration to a 64-byte memory location\n" "/// specified by \"mem_addr\". The tile configuration includes the tile type\n" "/// palette, the number of bytes per row, and the number of rows. If tiles\n" "/// are not configured, all zeroes will be stored to memory.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the STTILECFG instruction.\n" "///\n" "/// \\param __config\n" "/// A pointer to 512-bits configuration\n" "static __inline__ void __DEFAULT_FN_ATTRS_TILE\n" "_tile_storeconfig(void *__config) {\n" " __builtin_ia32_tile_storeconfig(__config);\n" "}\n" "\n" "/// Release the tile configuration to return to the init state, which\n" "/// releases all storage it currently holds.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILERELEASE instruction.\n" "static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {\n" " __builtin_ia32_tilerelease();\n" "}\n" "\n" "/// Load tile rows from memory specifieid by \"base\" address and \"stride\" into\n" "/// destination tile \"dst\" using the tile configuration previously configured\n" "/// via \"_tile_loadconfig\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILELOADD instruction.\n" "///\n" "/// \\param dst\n" "/// A destination tile. Max size is 1024 Bytes.\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be loaded in memory.\n" "#define _tile_loadd(dst, base, stride) \\\n" " __builtin_ia32_tileloadd64((dst), ((const void *)(base)), \\\n" " (__SIZE_TYPE__)(stride))\n" "\n" "/// Load tile rows from memory specifieid by \"base\" address and \"stride\" into\n" "/// destination tile \"dst\" using the tile configuration previously configured\n" "/// via \"_tile_loadconfig\". This intrinsic provides a hint to the implementation\n" "/// that the data will likely not be reused in the near future and the data\n" "/// caching can be optimized accordingly.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILELOADDT1 instruction.\n" "///\n" "/// \\param dst\n" "/// A destination tile. Max size is 1024 Bytes.\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be loaded in memory.\n" "#define _tile_stream_loadd(dst, base, stride) \\\n" " __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), \\\n" " (__SIZE_TYPE__)(stride))\n" "\n" "/// Store the tile specified by \"src\" to memory specifieid by \"base\" address and\n" "/// \"stride\" using the tile configuration previously configured via\n" "/// \"_tile_loadconfig\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILESTORED instruction.\n" "///\n" "/// \\param dst\n" "/// A destination tile. Max size is 1024 Bytes.\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be stored in memory.\n" "#define _tile_stored(dst, base, stride) \\\n" " __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))\n" "\n" "/// Zero the tile specified by \"tdest\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILEZERO instruction.\n" "///\n" "/// \\param tile\n" "/// The destination tile to be zero. Max size is 1024 Bytes.\n" "#define _tile_zero(tile) __builtin_ia32_tilezero((tile))\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with\n" "/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit\n" "/// results. Sum these 4 results with the corresponding 32-bit integer in \"dst\",\n" "/// and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBSSD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpbssd(dst, src0, src1) \\\n" " __builtin_ia32_tdpbssd((dst), (src0), (src1))\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with\n" "/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate\n" "/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \"dst\", and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBSUD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpbsud(dst, src0, src1) \\\n" " __builtin_ia32_tdpbsud((dst), (src0), (src1))\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with\n" "/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit\n" "/// results. Sum these 4 results with the corresponding 32-bit integer in \"dst\",\n" "/// and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBUSD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpbusd(dst, src0, src1) \\\n" " __builtin_ia32_tdpbusd((dst), (src0), (src1))\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with\n" "/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate\n" "/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in\n" "/// \"dst\", and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBUUD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpbuud(dst, src0, src1) \\\n" " __builtin_ia32_tdpbuud((dst), (src0), (src1))\n" "\n" "/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and\n" "/// src1, accumulating the intermediate single-precision (32-bit) floating-point\n" "/// elements with elements in \"dst\", and store the 32-bit result back to tile\n" "/// \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBF16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "#define _tile_dpbf16ps(dst, src0, src1) \\\n" " __builtin_ia32_tdpbf16ps((dst), (src0), (src1))\n" "\n" "/// AMX tile register size can be configured, the maximum size is 16x64=1024\n" "/// bytes. Since there is no 2D type in llvm IR, we use vector type to\n" "/// represent 2D tile and the fixed size is maximum amx tile register size.\n" "typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64)));\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_loadd_internal(unsigned short m, unsigned short n, const void *base,\n" " __SIZE_TYPE__ stride) {\n" " return __builtin_ia32_tileloadd64_internal(m, n, base,\n" " (__SIZE_TYPE__)(stride));\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base,\n" " __SIZE_TYPE__ stride) {\n" " return __builtin_ia32_tileloaddt164_internal(m, n, base,\n" " (__SIZE_TYPE__)(stride));\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8\n" "_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ void __DEFAULT_FN_ATTRS_INT8\n" "_tile_stored_internal(unsigned short m, unsigned short n, void *base,\n" " __SIZE_TYPE__ stride, _tile1024i tile) {\n" " return __builtin_ia32_tilestored64_internal(m, n, base,\n" " (__SIZE_TYPE__)(stride), tile);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16\n" "_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This is internal intrinsic. C/C++ user should avoid calling it directly.\n" "static __inline__ _tile1024i __DEFAULT_FN_ATTRS_FP16\n" "_tile_dpfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,\n" " _tile1024i dst, _tile1024i src1, _tile1024i src2) {\n" " return __builtin_ia32_tdpfp16ps_internal(m, n, k, dst, src1, src2);\n" "}\n" "\n" "/// This struct pack the shape and tile data together for user. We suggest\n" "/// initializing the struct as early as possible, because compiler depends\n" "/// on the shape information to do configure. The constant value is preferred\n" "/// for optimization by compiler.\n" "typedef struct __tile1024i_str {\n" " const unsigned short row;\n" " const unsigned short col;\n" " _tile1024i tile;\n" "} __tile1024i;\n" "\n" "/// Load tile rows from memory specifieid by \"base\" address and \"stride\" into\n" "/// destination tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILELOADD instruction.\n" "///\n" "/// \\param dst\n" "/// A destination tile. Max size is 1024 Bytes.\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be loaded in memory.\n" "__DEFAULT_FN_ATTRS_TILE\n" "static __inline__ void __tile_loadd(__tile1024i *dst, const void *base,\n" " __SIZE_TYPE__ stride) {\n" " dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);\n" "}\n" "\n" "/// Load tile rows from memory specifieid by \"base\" address and \"stride\" into\n" "/// destination tile \"dst\". This intrinsic provides a hint to the implementation\n" "/// that the data will likely not be reused in the near future and the data\n" "/// caching can be optimized accordingly.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILELOADDT1 instruction.\n" "///\n" "/// \\param dst\n" "/// A destination tile. Max size is 1024 Bytes.\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be loaded in memory.\n" "__DEFAULT_FN_ATTRS_TILE\n" "static __inline__ void __tile_stream_loadd(__tile1024i *dst, const void *base,\n" " __SIZE_TYPE__ stride) {\n" " dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride);\n" "}\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with\n" "/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit\n" "/// results. Sum these 4 results with the corresponding 32-bit integer in \"dst\",\n" "/// and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBSSD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_INT8\n" "static __inline__ void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with\n" "/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate\n" "/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \"dst\", and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBSUD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_INT8\n" "static __inline__ void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with\n" "/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit\n" "/// results. Sum these 4 results with the corresponding 32-bit integer in \"dst\",\n" "/// and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBUSD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_INT8\n" "static __inline__ void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "/// Compute dot-product of bytes in tiles with a source/destination accumulator.\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with\n" "/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate\n" "/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in\n" "/// \"dst\", and store the 32-bit result back to tile \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBUUD instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_INT8\n" "static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "/// Store the tile specified by \"src\" to memory specifieid by \"base\" address and\n" "/// \"stride\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILESTORED instruction.\n" "///\n" "/// \\param base\n" "/// A pointer to base address.\n" "/// \\param stride\n" "/// The stride between the rows' data to be stored in memory.\n" "__DEFAULT_FN_ATTRS_TILE\n" "static __inline__ void __tile_stored(void *base, __SIZE_TYPE__ stride,\n" " __tile1024i src) {\n" " _tile_stored_internal(src.row, src.col, base, stride, src.tile);\n" "}\n" "\n" "/// Zero the tile specified by \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TILEZERO instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile to be zero. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_TILE\n" "static __inline__ void __tile_zero(__tile1024i *dst) {\n" " dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);\n" "}\n" "\n" "/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and\n" "/// src1, accumulating the intermediate single-precision (32-bit) floating-point\n" "/// elements with elements in \"dst\", and store the 32-bit result back to tile\n" "/// \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPBF16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_BF16\n" "static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles src0 and\n" "/// src1, accumulating the intermediate single-precision (32-bit) floating-point\n" "/// elements with elements in \"dst\", and store the 32-bit result back to tile\n" "/// \"dst\".\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TDPFP16PS instruction.\n" "///\n" "/// \\param dst\n" "/// The destination tile. Max size is 1024 Bytes.\n" "/// \\param src0\n" "/// The 1st source tile. Max size is 1024 Bytes.\n" "/// \\param src1\n" "/// The 2nd source tile. Max size is 1024 Bytes.\n" "__DEFAULT_FN_ATTRS_FP16\n" "static __inline__ void __tile_dpfp16ps(__tile1024i *dst, __tile1024i src0,\n" " __tile1024i src1) {\n" " dst->tile = _tile_dpfp16ps_internal(src0.row, src1.col, src0.col, dst->tile,\n" " src0.tile, src1.tile);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS_TILE\n" "#undef __DEFAULT_FN_ATTRS_INT8\n" "#undef __DEFAULT_FN_ATTRS_BF16\n" "#undef __DEFAULT_FN_ATTRS_FP16\n" "\n" "#endif /* __x86_64__ */\n" "#endif /* __AMXINTRIN_H */\n" "" } , { "/builtins/arm64intr.h" , "/*===---- arm64intr.h - ARM64 Windows intrinsics -------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/* Only include this if we're compiling for the windows platform. */\n" "#ifndef _MSC_VER\n" "#include_next \n" "#else\n" "\n" "#ifndef __ARM64INTR_H\n" "#define __ARM64INTR_H\n" "\n" "typedef enum\n" "{\n" " _ARM64_BARRIER_SY = 0xF,\n" " _ARM64_BARRIER_ST = 0xE,\n" " _ARM64_BARRIER_LD = 0xD,\n" " _ARM64_BARRIER_ISH = 0xB,\n" " _ARM64_BARRIER_ISHST = 0xA,\n" " _ARM64_BARRIER_ISHLD = 0x9,\n" " _ARM64_BARRIER_NSH = 0x7,\n" " _ARM64_BARRIER_NSHST = 0x6,\n" " _ARM64_BARRIER_NSHLD = 0x5,\n" " _ARM64_BARRIER_OSH = 0x3,\n" " _ARM64_BARRIER_OSHST = 0x2,\n" " _ARM64_BARRIER_OSHLD = 0x1\n" "} _ARM64INTR_BARRIER_TYPE;\n" "\n" "#endif /* __ARM64INTR_H */\n" "#endif /* _MSC_VER */\n" "" } , { "/builtins/arm_acle.h" , "/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_ACLE_H\n" "#define __ARM_ACLE_H\n" "\n" "#ifndef __ARM_ACLE\n" "#error \"ACLE intrinsics support not enabled.\"\n" "#endif\n" "\n" "#include \n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */\n" "/* 8.3 Memory barriers */\n" "#if !__has_builtin(__dmb)\n" "#define __dmb(i) __builtin_arm_dmb(i)\n" "#endif\n" "#if !__has_builtin(__dsb)\n" "#define __dsb(i) __builtin_arm_dsb(i)\n" "#endif\n" "#if !__has_builtin(__isb)\n" "#define __isb(i) __builtin_arm_isb(i)\n" "#endif\n" "\n" "/* 8.4 Hints */\n" "\n" "#if !__has_builtin(__wfi)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {\n" " __builtin_arm_wfi();\n" "}\n" "#endif\n" "\n" "#if !__has_builtin(__wfe)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {\n" " __builtin_arm_wfe();\n" "}\n" "#endif\n" "\n" "#if !__has_builtin(__sev)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {\n" " __builtin_arm_sev();\n" "}\n" "#endif\n" "\n" "#if !__has_builtin(__sevl)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {\n" " __builtin_arm_sevl();\n" "}\n" "#endif\n" "\n" "#if !__has_builtin(__yield)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {\n" " __builtin_arm_yield();\n" "}\n" "#endif\n" "\n" "#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE\n" "#define __dbg(t) __builtin_arm_dbg(t)\n" "#endif\n" "\n" "/* 8.5 Swap */\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__swp(uint32_t __x, volatile uint32_t *__p) {\n" " uint32_t v;\n" " do\n" " v = __builtin_arm_ldrex(__p);\n" " while (__builtin_arm_strex(__x, __p));\n" " return v;\n" "}\n" "\n" "/* 8.6 Memory prefetch intrinsics */\n" "/* 8.6.1 Data prefetch */\n" "#define __pld(addr) __pldx(0, 0, 0, addr)\n" "\n" "#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE\n" "#define __pldx(access_kind, cache_level, retention_policy, addr) \\\n" " __builtin_arm_prefetch(addr, access_kind, 1)\n" "#else\n" "#define __pldx(access_kind, cache_level, retention_policy, addr) \\\n" " __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)\n" "#endif\n" "\n" "/* 8.6.2 Instruction prefetch */\n" "#define __pli(addr) __plix(0, 0, addr)\n" "\n" "#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE\n" "#define __plix(cache_level, retention_policy, addr) \\\n" " __builtin_arm_prefetch(addr, 0, 0)\n" "#else\n" "#define __plix(cache_level, retention_policy, addr) \\\n" " __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)\n" "#endif\n" "\n" "/* 8.7 NOP */\n" "#if !defined(_MSC_VER) || !defined(__aarch64__)\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {\n" " __builtin_arm_nop();\n" "}\n" "#endif\n" "\n" "/* 9 DATA-PROCESSING INTRINSICS */\n" "/* 9.2 Miscellaneous data-processing intrinsics */\n" "/* ROR */\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__ror(uint32_t __x, uint32_t __y) {\n" " __y %= 32;\n" " if (__y == 0)\n" " return __x;\n" " return (__x >> __y) | (__x << (32 - __y));\n" "}\n" "\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))\n" "__rorll(uint64_t __x, uint32_t __y) {\n" " __y %= 64;\n" " if (__y == 0)\n" " return __x;\n" " return (__x >> __y) | (__x << (64 - __y));\n" "}\n" "\n" "static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))\n" "__rorl(unsigned long __x, uint32_t __y) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __ror(__x, __y);\n" "#else\n" " return __rorll(__x, __y);\n" "#endif\n" "}\n" "\n" "\n" "/* CLZ */\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__clz(uint32_t __t) {\n" " return __builtin_arm_clz(__t);\n" "}\n" "\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__clzl(unsigned long __t) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __builtin_arm_clz(__t);\n" "#else\n" " return __builtin_arm_clz64(__t);\n" "#endif\n" "}\n" "\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__clzll(uint64_t __t) {\n" " return __builtin_arm_clz64(__t);\n" "}\n" "\n" "/* CLS */\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__cls(uint32_t __t) {\n" " return __builtin_arm_cls(__t);\n" "}\n" "\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__clsl(unsigned long __t) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __builtin_arm_cls(__t);\n" "#else\n" " return __builtin_arm_cls64(__t);\n" "#endif\n" "}\n" "\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))\n" "__clsll(uint64_t __t) {\n" " return __builtin_arm_cls64(__t);\n" "}\n" "\n" "/* REV */\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__rev(uint32_t __t) {\n" " return __builtin_bswap32(__t);\n" "}\n" "\n" "static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))\n" "__revl(unsigned long __t) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __builtin_bswap32(__t);\n" "#else\n" " return __builtin_bswap64(__t);\n" "#endif\n" "}\n" "\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))\n" "__revll(uint64_t __t) {\n" " return __builtin_bswap64(__t);\n" "}\n" "\n" "/* REV16 */\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__rev16(uint32_t __t) {\n" " return __ror(__rev(__t), 16);\n" "}\n" "\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))\n" "__rev16ll(uint64_t __t) {\n" " return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);\n" "}\n" "\n" "static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))\n" "__rev16l(unsigned long __t) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __rev16(__t);\n" "#else\n" " return __rev16ll(__t);\n" "#endif\n" "}\n" "\n" "/* REVSH */\n" "static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))\n" "__revsh(int16_t __t) {\n" " return (int16_t)__builtin_bswap16((uint16_t)__t);\n" "}\n" "\n" "/* RBIT */\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__rbit(uint32_t __t) {\n" " return __builtin_arm_rbit(__t);\n" "}\n" "\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))\n" "__rbitll(uint64_t __t) {\n" "#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE\n" " return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |\n" " __builtin_arm_rbit(__t >> 32);\n" "#else\n" " return __builtin_arm_rbit64(__t);\n" "#endif\n" "}\n" "\n" "static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))\n" "__rbitl(unsigned long __t) {\n" "#if __SIZEOF_LONG__ == 4\n" " return __rbit(__t);\n" "#else\n" " return __rbitll(__t);\n" "#endif\n" "}\n" "\n" "/*\n" " * 9.3 16-bit multiplications\n" " */\n" "#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smulbb(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smulbb(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smulbt(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smulbt(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smultb(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smultb(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smultt(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smultt(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smulwb(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smulwb(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))\n" "__smulwt(int32_t __a, int32_t __b) {\n" " return __builtin_arm_smulwt(__a, __b);\n" "}\n" "#endif\n" "\n" "/*\n" " * 9.4 Saturating intrinsics\n" " *\n" " * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag\n" " * intrinsics are implemented and the flag is enabled.\n" " */\n" "/* 9.4.1 Width-specified saturation intrinsics */\n" "#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT\n" "#define __ssat(x, y) __builtin_arm_ssat(x, y)\n" "#define __usat(x, y) __builtin_arm_usat(x, y)\n" "#endif\n" "\n" "/* 9.4.2 Saturating addition and subtraction intrinsics */\n" "#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__qadd(int32_t __t, int32_t __v) {\n" " return __builtin_arm_qadd(__t, __v);\n" "}\n" "\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__qsub(int32_t __t, int32_t __v) {\n" " return __builtin_arm_qsub(__t, __v);\n" "}\n" "\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__qdbl(int32_t __t) {\n" " return __builtin_arm_qadd(__t, __t);\n" "}\n" "#endif\n" "\n" "/* 9.4.3 Accumultating multiplications */\n" "#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlabb(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlabb(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlabt(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlabt(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlatb(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlatb(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlatt(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlatt(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlawb(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlawb(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlawt(int32_t __a, int32_t __b, int32_t __c) {\n" " return __builtin_arm_smlawt(__a, __b, __c);\n" "}\n" "#endif\n" "\n" "\n" "/* 9.5.4 Parallel 16-bit saturation */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "#define __ssat16(x, y) __builtin_arm_ssat16(x, y)\n" "#define __usat16(x, y) __builtin_arm_usat16(x, y)\n" "#endif\n" "\n" "/* 9.5.5 Packing and unpacking */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "typedef int32_t int8x4_t;\n" "typedef int32_t int16x2_t;\n" "typedef uint32_t uint8x4_t;\n" "typedef uint32_t uint16x2_t;\n" "\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__sxtab16(int16x2_t __a, int8x4_t __b) {\n" " return __builtin_arm_sxtab16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__sxtb16(int8x4_t __a) {\n" " return __builtin_arm_sxtb16(__a);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uxtab16(int16x2_t __a, int8x4_t __b) {\n" " return __builtin_arm_uxtab16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uxtb16(int8x4_t __a) {\n" " return __builtin_arm_uxtb16(__a);\n" "}\n" "#endif\n" "\n" "/* 9.5.6 Parallel selection */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__sel(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_sel(__a, __b);\n" "}\n" "#endif\n" "\n" "/* 9.5.7 Parallel 8-bit addition and subtraction */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__qadd8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_qadd8(__a, __b);\n" "}\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__qsub8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_qsub8(__a, __b);\n" "}\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__sadd8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_sadd8(__a, __b);\n" "}\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__shadd8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_shadd8(__a, __b);\n" "}\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__shsub8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_shsub8(__a, __b);\n" "}\n" "static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__ssub8(int8x4_t __a, int8x4_t __b) {\n" " return __builtin_arm_ssub8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__uadd8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_uadd8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__uhadd8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_uhadd8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__uhsub8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_uhsub8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__uqadd8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_uqadd8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__uqsub8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_uqsub8(__a, __b);\n" "}\n" "static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))\n" "__usub8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_usub8(__a, __b);\n" "}\n" "#endif\n" "\n" "/* 9.5.8 Sum of 8-bit absolute differences */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__usad8(uint8x4_t __a, uint8x4_t __b) {\n" " return __builtin_arm_usad8(__a, __b);\n" "}\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))\n" "__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {\n" " return __builtin_arm_usada8(__a, __b, __c);\n" "}\n" "#endif\n" "\n" "/* 9.5.9 Parallel 16-bit addition and subtraction */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__qadd16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_qadd16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__qasx(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_qasx(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__qsax(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_qsax(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__qsub16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_qsub16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__sadd16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_sadd16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__sasx(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_sasx(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__shadd16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_shadd16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__shasx(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_shasx(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__shsax(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_shsax(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__shsub16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_shsub16(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__ssax(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_ssax(__a, __b);\n" "}\n" "static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__ssub16(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_ssub16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uadd16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uadd16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uasx(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uasx(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uhadd16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uhadd16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uhasx(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uhasx(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uhsax(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uhsax(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uhsub16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uhsub16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uqadd16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uqadd16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uqasx(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uqasx(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uqsax(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uqsax(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__uqsub16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_uqsub16(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__usax(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_usax(__a, __b);\n" "}\n" "static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))\n" "__usub16(uint16x2_t __a, uint16x2_t __b) {\n" " return __builtin_arm_usub16(__a, __b);\n" "}\n" "#endif\n" "\n" "/* 9.5.10 Parallel 16-bit multiplications */\n" "#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {\n" " return __builtin_arm_smlad(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {\n" " return __builtin_arm_smladx(__a, __b, __c);\n" "}\n" "static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))\n" "__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {\n" " return __builtin_arm_smlald(__a, __b, __c);\n" "}\n" "static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))\n" "__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {\n" " return __builtin_arm_smlaldx(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {\n" " return __builtin_arm_smlsd(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {\n" " return __builtin_arm_smlsdx(__a, __b, __c);\n" "}\n" "static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))\n" "__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {\n" " return __builtin_arm_smlsld(__a, __b, __c);\n" "}\n" "static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))\n" "__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {\n" " return __builtin_arm_smlsldx(__a, __b, __c);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smuad(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_smuad(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smuadx(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_smuadx(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smusd(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_smusd(__a, __b);\n" "}\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))\n" "__smusdx(int16x2_t __a, int16x2_t __b) {\n" " return __builtin_arm_smusdx(__a, __b);\n" "}\n" "#endif\n" "\n" "/* 9.7 CRC32 intrinsics */\n" "#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \\\n" " (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32b(uint32_t __a, uint8_t __b) {\n" " return __builtin_arm_crc32b(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32h(uint32_t __a, uint16_t __b) {\n" " return __builtin_arm_crc32h(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32w(uint32_t __a, uint32_t __b) {\n" " return __builtin_arm_crc32w(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32d(uint32_t __a, uint64_t __b) {\n" " return __builtin_arm_crc32d(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32cb(uint32_t __a, uint8_t __b) {\n" " return __builtin_arm_crc32cb(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32ch(uint32_t __a, uint16_t __b) {\n" " return __builtin_arm_crc32ch(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32cw(uint32_t __a, uint32_t __b) {\n" " return __builtin_arm_crc32cw(__a, __b);\n" "}\n" "\n" "static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target(\"crc\")))\n" "__crc32cd(uint32_t __a, uint64_t __b) {\n" " return __builtin_arm_crc32cd(__a, __b);\n" "}\n" "#endif\n" "\n" "/* Armv8.3-A Javascript conversion intrinsic */\n" "#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE\n" "static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target(\"v8.3a\")))\n" "__jcvt(double __a) {\n" " return __builtin_arm_jcvt(__a);\n" "}\n" "#endif\n" "\n" "/* Armv8.5-A FP rounding intrinsics */\n" "#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE\n" "static __inline__ float __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint32zf(float __a) {\n" " return __builtin_arm_rint32zf(__a);\n" "}\n" "\n" "static __inline__ double __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint32z(double __a) {\n" " return __builtin_arm_rint32z(__a);\n" "}\n" "\n" "static __inline__ float __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint64zf(float __a) {\n" " return __builtin_arm_rint64zf(__a);\n" "}\n" "\n" "static __inline__ double __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint64z(double __a) {\n" " return __builtin_arm_rint64z(__a);\n" "}\n" "\n" "static __inline__ float __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint32xf(float __a) {\n" " return __builtin_arm_rint32xf(__a);\n" "}\n" "\n" "static __inline__ double __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint32x(double __a) {\n" " return __builtin_arm_rint32x(__a);\n" "}\n" "\n" "static __inline__ float __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint64xf(float __a) {\n" " return __builtin_arm_rint64xf(__a);\n" "}\n" "\n" "static __inline__ double __attribute__((__always_inline__, __nodebug__, target(\"v8.5a\")))\n" "__rint64x(double __a) {\n" " return __builtin_arm_rint64x(__a);\n" "}\n" "#endif\n" "\n" "/* Armv8.7-A load/store 64-byte intrinsics */\n" "#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE\n" "typedef struct {\n" " uint64_t val[8];\n" "} data512_t;\n" "\n" "static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target(\"ls64\")))\n" "__arm_ld64b(const void *__addr) {\n" " data512_t __value;\n" " __builtin_arm_ld64b(__addr, __value.val);\n" " return __value;\n" "}\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, target(\"ls64\")))\n" "__arm_st64b(void *__addr, data512_t __value) {\n" " __builtin_arm_st64b(__addr, __value.val);\n" "}\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target(\"ls64\")))\n" "__arm_st64bv(void *__addr, data512_t __value) {\n" " return __builtin_arm_st64bv(__addr, __value.val);\n" "}\n" "static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target(\"ls64\")))\n" "__arm_st64bv0(void *__addr, data512_t __value) {\n" " return __builtin_arm_st64bv0(__addr, __value.val);\n" "}\n" "#endif\n" "\n" "/* 10.1 Special register intrinsics */\n" "#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)\n" "#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)\n" "#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)\n" "#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)\n" "#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))\n" "#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))\n" "#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)\n" "#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)\n" "#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v)\n" "#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)\n" "#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))\n" "#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))\n" "\n" "/* Memory Tagging Extensions (MTE) Intrinsics */\n" "#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE\n" "#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)\n" "#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)\n" "#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)\n" "#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)\n" "#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)\n" "#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)\n" "\n" "/* Memory Operations Intrinsics */\n" "#define __arm_mops_memset_tag(__tagged_address, __value, __size) \\\n" " __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)\n" "#endif\n" "\n" "/* Transactional Memory Extension (TME) Intrinsics */\n" "#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME\n" "\n" "#define _TMFAILURE_REASON 0x00007fffu\n" "#define _TMFAILURE_RTRY 0x00008000u\n" "#define _TMFAILURE_CNCL 0x00010000u\n" "#define _TMFAILURE_MEM 0x00020000u\n" "#define _TMFAILURE_IMP 0x00040000u\n" "#define _TMFAILURE_ERR 0x00080000u\n" "#define _TMFAILURE_SIZE 0x00100000u\n" "#define _TMFAILURE_NEST 0x00200000u\n" "#define _TMFAILURE_DBG 0x00400000u\n" "#define _TMFAILURE_INT 0x00800000u\n" "#define _TMFAILURE_TRIVIAL 0x01000000u\n" "\n" "#define __tstart() __builtin_arm_tstart()\n" "#define __tcommit() __builtin_arm_tcommit()\n" "#define __tcancel(__arg) __builtin_arm_tcancel(__arg)\n" "#define __ttest() __builtin_arm_ttest()\n" "\n" "#endif /* __ARM_FEATURE_TME */\n" "\n" "/* Armv8.5-A Random number generation intrinsics */\n" "#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, target(\"rand\")))\n" "__rndr(uint64_t *__p) {\n" " return __builtin_arm_rndr(__p);\n" "}\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, target(\"rand\")))\n" "__rndrrs(uint64_t *__p) {\n" " return __builtin_arm_rndrrs(__p);\n" "}\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "}\n" "#endif\n" "\n" "#endif /* __ARM_ACLE_H */\n" "" } , { "/builtins/arm_bf16.h" , "/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_BF16_H\n" "#define __ARM_BF16_H\n" "\n" "typedef __bf16 bfloat16_t;\n" "#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))\n" "\n" "\n" "#undef __ai\n" "\n" "#endif\n" "" } , { "/builtins/arm_cde.h" , "/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_CDE_H\n" "#define __ARM_CDE_H\n" "\n" "#if !__ARM_FEATURE_CDE\n" "#error \"CDE support not enabled\"\n" "#endif\n" "\n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1)))\n" "uint32_t __arm_cx1(int, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a)))\n" "uint32_t __arm_cx1a(int, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d)))\n" "uint64_t __arm_cx1d(int, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da)))\n" "uint64_t __arm_cx1da(int, uint64_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2)))\n" "uint32_t __arm_cx2(int, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a)))\n" "uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d)))\n" "uint64_t __arm_cx2d(int, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da)))\n" "uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3)))\n" "uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a)))\n" "uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d)))\n" "uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da)))\n" "uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32)))\n" "uint32_t __arm_vcx1_u32(int, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32)))\n" "uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64)))\n" "uint64_t __arm_vcx1d_u64(int, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64)))\n" "uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32)))\n" "uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32)))\n" "uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64)))\n" "uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64)))\n" "uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32)))\n" "uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32)))\n" "uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64)))\n" "uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64)))\n" "uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t);\n" "\n" "#if __ARM_FEATURE_MVE\n" "\n" "typedef uint16_t mve_pred16_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;\n" "typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;\n" "typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;\n" "typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;\n" "typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;\n" "\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16)))\n" "int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32)))\n" "int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64)))\n" "int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8)))\n" "int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16)))\n" "uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32)))\n" "uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64)))\n" "uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8)))\n" "uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8)))\n" "uint8x16_t __arm_vcx1q_u8(int, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16)))\n" "int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32)))\n" "int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64)))\n" "int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8)))\n" "int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16)))\n" "uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32)))\n" "uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64)))\n" "uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8)))\n" "uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16)))\n" "int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32)))\n" "int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64)))\n" "int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8)))\n" "int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16)))\n" "uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32)))\n" "uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64)))\n" "uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8)))\n" "uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16)))\n" "int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32)))\n" "int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64)))\n" "int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8)))\n" "int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16)))\n" "uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32)))\n" "uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64)))\n" "uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8)))\n" "uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16)))\n" "int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32)))\n" "int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64)))\n" "int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8)))\n" "int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16)))\n" "uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32)))\n" "uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64)))\n" "uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8)))\n" "uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16)))\n" "uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32)))\n" "uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64)))\n" "uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8)))\n" "uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16)))\n" "uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32)))\n" "uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64)))\n" "uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8)))\n" "uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16)))\n" "int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32)))\n" "int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64)))\n" "int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8)))\n" "int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16)))\n" "uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32)))\n" "uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64)))\n" "uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8)))\n" "uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16)))\n" "int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32)))\n" "int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64)))\n" "int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8)))\n" "int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16)))\n" "uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32)))\n" "uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64)))\n" "uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8)))\n" "uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16)))\n" "int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32)))\n" "int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64)))\n" "int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8)))\n" "int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16)))\n" "uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32)))\n" "uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64)))\n" "uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8)))\n" "uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16)))\n" "int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32)))\n" "int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64)))\n" "int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8)))\n" "int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16)))\n" "uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32)))\n" "uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64)))\n" "uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8)))\n" "uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16)))\n" "int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32)))\n" "int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64)))\n" "int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8)))\n" "int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16)))\n" "uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32)))\n" "uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64)))\n" "uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8)))\n" "uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16)))\n" "int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32)))\n" "int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64)))\n" "int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8)))\n" "int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16)))\n" "uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32)))\n" "uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64)))\n" "uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8)))\n" "uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))\n" "int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))\n" "int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))\n" "int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))\n" "int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))\n" "uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))\n" "uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))\n" "uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))\n" "uint8x16_t __arm_vreinterpretq_u8(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))\n" "uint8x16_t __arm_vreinterpretq_u8(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))\n" "uint8x16_t __arm_vreinterpretq_u8(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))\n" "uint8x16_t __arm_vreinterpretq_u8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint8x16_t);\n" "#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred))\n" "#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm))\n" "#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred))\n" "#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))\n" "#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))\n" "#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))\n" "#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm))\n" "#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))\n" "\n" "#endif /* __ARM_FEATURE_MVE */\n" "\n" "#if __ARM_FEATURE_MVE & 2\n" "\n" "typedef __fp16 float16_t;\n" "typedef float float32_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;\n" "\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16)))\n" "float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32)))\n" "float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16)))\n" "float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32)))\n" "float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16)))\n" "float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32)))\n" "float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16)))\n" "float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32)))\n" "float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16)))\n" "float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32)))\n" "float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16)))\n" "uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32)))\n" "uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16)))\n" "float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32)))\n" "float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16)))\n" "float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32)))\n" "float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16)))\n" "float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32)))\n" "float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16)))\n" "float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32)))\n" "float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32)))\n" "uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16)))\n" "float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32)))\n" "float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16)))\n" "float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32)))\n" "float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))\n" "float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))\n" "float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))\n" "uint8x16_t __arm_vreinterpretq_u8(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))\n" "uint8x16_t __arm_vreinterpretq_u8(float32x4_t);\n" "\n" "#endif /* __ARM_FEATURE_MVE & 2 */\n" "\n" "#ifdef __cplusplus\n" "} /* extern \"C\" */\n" "#endif\n" "\n" "#endif /* __ARM_CDE_H */\n" "" } , { "/builtins/arm_cmse.h" , "//===---- arm_cmse.h - Arm CMSE support -----------------------------------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "\n" "#ifndef __ARM_CMSE_H\n" "#define __ARM_CMSE_H\n" "\n" "#if (__ARM_FEATURE_CMSE & 0x1)\n" "#include \n" "#include \n" "\n" "#define __ARM_CMSE_SECURE_MODE (__ARM_FEATURE_CMSE & 0x2)\n" "#define CMSE_MPU_READWRITE 1 /* checks if readwrite_ok field is set */\n" "#define CMSE_AU_NONSECURE 2 /* checks if permissions have secure field unset */\n" "#define CMSE_MPU_UNPRIV 4 /* sets T flag on TT insrtuction */\n" "#define CMSE_MPU_READ 8 /* checks if read_ok field is set */\n" "#define CMSE_MPU_NONSECURE 16 /* sets A flag, checks if secure field unset */\n" "#define CMSE_NONSECURE (CMSE_AU_NONSECURE | CMSE_MPU_NONSECURE)\n" "\n" "#define cmse_check_pointed_object(p, f) \\\n" " cmse_check_address_range((p), sizeof(*(p)), (f))\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "typedef union {\n" " struct cmse_address_info {\n" "#ifdef __ARM_BIG_ENDIAN\n" " /* __ARM_BIG_ENDIAN */\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned idau_region : 8;\n" " unsigned idau_region_valid : 1;\n" " unsigned secure : 1;\n" " unsigned nonsecure_readwrite_ok : 1;\n" " unsigned nonsecure_read_ok : 1;\n" "#else\n" " unsigned : 12;\n" "#endif\n" " unsigned readwrite_ok : 1;\n" " unsigned read_ok : 1;\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned sau_region_valid : 1;\n" "#else\n" " unsigned : 1;\n" "#endif\n" " unsigned mpu_region_valid : 1;\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned sau_region : 8;\n" "#else\n" " unsigned : 8;\n" "#endif\n" " unsigned mpu_region : 8;\n" "\n" "#else /* __ARM_LITTLE_ENDIAN */\n" " unsigned mpu_region : 8;\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned sau_region : 8;\n" "#else\n" " unsigned : 8;\n" "#endif\n" " unsigned mpu_region_valid : 1;\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned sau_region_valid : 1;\n" "#else\n" " unsigned : 1;\n" "#endif\n" " unsigned read_ok : 1;\n" " unsigned readwrite_ok : 1;\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " unsigned nonsecure_read_ok : 1;\n" " unsigned nonsecure_readwrite_ok : 1;\n" " unsigned secure : 1;\n" " unsigned idau_region_valid : 1;\n" " unsigned idau_region : 8;\n" "#else\n" " unsigned : 12;\n" "#endif\n" "#endif /*__ARM_LITTLE_ENDIAN */\n" " } flags;\n" " unsigned value;\n" "} cmse_address_info_t;\n" "\n" "static cmse_address_info_t __attribute__((__always_inline__, __nodebug__))\n" "cmse_TT(void *__p) {\n" " cmse_address_info_t __u;\n" " __u.value = __builtin_arm_cmse_TT(__p);\n" " return __u;\n" "}\n" "static cmse_address_info_t __attribute__((__always_inline__, __nodebug__))\n" "cmse_TTT(void *__p) {\n" " cmse_address_info_t __u;\n" " __u.value = __builtin_arm_cmse_TTT(__p);\n" " return __u;\n" "}\n" "\n" "#if __ARM_CMSE_SECURE_MODE\n" "static cmse_address_info_t __attribute__((__always_inline__, __nodebug__))\n" "cmse_TTA(void *__p) {\n" " cmse_address_info_t __u;\n" " __u.value = __builtin_arm_cmse_TTA(__p);\n" " return __u;\n" "}\n" "static cmse_address_info_t __attribute__((__always_inline__, __nodebug__))\n" "cmse_TTAT(void *__p) {\n" " cmse_address_info_t __u;\n" " __u.value = __builtin_arm_cmse_TTAT(__p);\n" " return __u;\n" "}\n" "#endif\n" "\n" "#define cmse_TT_fptr(p) cmse_TT(__builtin_bit_cast(void *, (p)))\n" "#define cmse_TTT_fptr(p) cmse_TTT(__builtin_bit_cast(void *, (p)))\n" "\n" "#if __ARM_CMSE_SECURE_MODE\n" "#define cmse_TTA_fptr(p) cmse_TTA(__builtin_bit_cast(void *, (p)))\n" "#define cmse_TTAT_fptr(p) cmse_TTAT(__builtin_bit_cast(void *, (p)))\n" "#endif\n" "\n" "static void *__attribute__((__always_inline__))\n" "cmse_check_address_range(void *__pb, size_t __s, int __flags) {\n" " uintptr_t __begin = (uintptr_t)__pb;\n" " uintptr_t __end = __begin + __s - 1;\n" "\n" " if (__end < __begin)\n" " return NULL; /* wrap around check */\n" "\n" " /* Check whether the range crosses a 32-bytes aligned address */\n" " const int __single_check = (__begin ^ __end) < 0x20u;\n" "\n" " /* execute the right variant of the TT instructions */\n" " void *__pe = (void *)__end;\n" " cmse_address_info_t __permb, __perme;\n" " switch (__flags & (CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) {\n" " case 0:\n" " __permb = cmse_TT(__pb);\n" " __perme = __single_check ? __permb : cmse_TT(__pe);\n" " break;\n" " case CMSE_MPU_UNPRIV:\n" " __permb = cmse_TTT(__pb);\n" " __perme = __single_check ? __permb : cmse_TTT(__pe);\n" " break;\n" "#if __ARM_CMSE_SECURE_MODE\n" " case CMSE_MPU_NONSECURE:\n" " __permb = cmse_TTA(__pb);\n" " __perme = __single_check ? __permb : cmse_TTA(__pe);\n" " break;\n" " case CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE:\n" " __permb = cmse_TTAT(__pb);\n" " __perme = __single_check ? __permb : cmse_TTAT(__pe);\n" " break;\n" "#endif\n" " /* if CMSE_NONSECURE is specified w/o __ARM_CMSE_SECURE_MODE */\n" " default:\n" " return NULL;\n" " }\n" "\n" " /* check that the range does not cross MPU, SAU, or IDAU region boundaries */\n" " if (__permb.value != __perme.value)\n" " return NULL;\n" "#if !(__ARM_CMSE_SECURE_MODE)\n" " /* CMSE_AU_NONSECURE is only supported when __ARM_FEATURE_CMSE & 0x2 */\n" " if (__flags & CMSE_AU_NONSECURE)\n" " return NULL;\n" "#endif\n" "\n" " /* check the permission on the range */\n" " switch (__flags & ~(CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) {\n" "#if (__ARM_CMSE_SECURE_MODE)\n" " case CMSE_MPU_READ | CMSE_MPU_READWRITE | CMSE_AU_NONSECURE:\n" " case CMSE_MPU_READWRITE | CMSE_AU_NONSECURE:\n" " return __permb.flags.nonsecure_readwrite_ok ? __pb : NULL;\n" "\n" " case CMSE_MPU_READ | CMSE_AU_NONSECURE:\n" " return __permb.flags.nonsecure_read_ok ? __pb : NULL;\n" "\n" " case CMSE_AU_NONSECURE:\n" " return __permb.flags.secure ? NULL : __pb;\n" "#endif\n" " case CMSE_MPU_READ | CMSE_MPU_READWRITE:\n" " case CMSE_MPU_READWRITE:\n" " return __permb.flags.readwrite_ok ? __pb : NULL;\n" "\n" " case CMSE_MPU_READ:\n" " return __permb.flags.read_ok ? __pb : NULL;\n" "\n" " default:\n" " return NULL;\n" " }\n" "}\n" "\n" "#if __ARM_CMSE_SECURE_MODE\n" "static int __attribute__((__always_inline__, __nodebug__))\n" "cmse_nonsecure_caller(void) {\n" " return !((uintptr_t)__builtin_return_address(0) & 1);\n" "}\n" "\n" "#define cmse_nsfptr_create(p) \\\n" " __builtin_bit_cast(__typeof__(p), \\\n" " (__builtin_bit_cast(uintptr_t, p) & ~(uintptr_t)1))\n" "\n" "#define cmse_is_nsfptr(p) ((__builtin_bit_cast(uintptr_t, p) & 1) == 0)\n" "\n" "#endif /* __ARM_CMSE_SECURE_MODE */\n" "\n" "void __attribute__((__noreturn__)) cmse_abort(void);\n" "#if defined(__cplusplus)\n" "}\n" "#endif\n" "\n" "#endif /* (__ARM_FEATURE_CMSE & 0x1) */\n" "\n" "#endif /* __ARM_CMSE_H */\n" "" } , { "/builtins/arm_fp16.h" , "/*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------===\n" " *\n" " * Permission is hereby granted, free of charge, to any person obtaining a copy\n" " * of this software and associated documentation files (the \"Software\"), to deal\n" " * in the Software without restriction, including without limitation the rights\n" " * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n" " * copies of the Software, and to permit persons to whom the Software is\n" " * furnished to do so, subject to the following conditions:\n" " *\n" " * The above copyright notice and this permission notice shall be included in\n" " * all copies or substantial portions of the Software.\n" " *\n" " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n" " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n" " * THE SOFTWARE.\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_FP16_H\n" "#define __ARM_FP16_H\n" "\n" "#include \n" "\n" "typedef __fp16 float16_t;\n" "#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))\n" "\n" "#if defined(__aarch64__)\n" "#define vabdh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vabsh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vaddh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcageh_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcagth_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcaleh_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcalth_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vceqh_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vceqzh_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcgeh_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcgezh_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcgth_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcgtzh_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcleh_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vclezh_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vclth_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vcltzh_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_s16_f16(__p0) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_s32_f16(__p0) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_s64_f16(__p0) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_u16_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_u32_f16(__p0) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_u64_f16(__p0) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_s16_f16(__p0) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_s32_f16(__p0) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_s64_f16(__p0) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_u16_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_u32_f16(__p0) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtah_u64_f16(__p0) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_u16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_s16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_u32(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint32_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_s32(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int32_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_u64(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint64_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_f16_s64(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int64_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint32_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int32_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint64_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int64_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " uint16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " int16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_s16_f16(__p0) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_s32_f16(__p0) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_s64_f16(__p0) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_u16_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_u32_f16(__p0) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtmh_u64_f16(__p0) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_s16_f16(__p0) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_s32_f16(__p0) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_s64_f16(__p0) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_u16_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_u32_f16(__p0) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtnh_u64_f16(__p0) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_s16_f16(__p0) __extension__ ({ \\\n" " int16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_s32_f16(__p0) __extension__ ({ \\\n" " int32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_s64_f16(__p0) __extension__ ({ \\\n" " int64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_u16_f16(__p0) __extension__ ({ \\\n" " uint16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_u32_f16(__p0) __extension__ ({ \\\n" " uint32_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vcvtph_u64_f16(__p0) __extension__ ({ \\\n" " uint64_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vdivh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " float16_t __s2 = __p2; \\\n" " __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \\\n" " __ret; \\\n" "})\n" "#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " float16_t __s2 = __p2; \\\n" " __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \\\n" " __ret; \\\n" "})\n" "#define vmaxh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vminh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vminnmh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vmulh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vmulxh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vnegh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrecpeh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrecpsh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vrecpxh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndah_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndih_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndmh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndnh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndph_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrndxh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrsqrteh_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#define vsqrth_f16(__p0) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \\\n" " __ret; \\\n" "})\n" "#define vsubh_f16(__p0, __p1) __extension__ ({ \\\n" " float16_t __ret; \\\n" " float16_t __s0 = __p0; \\\n" " float16_t __s1 = __p1; \\\n" " __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \\\n" " __ret; \\\n" "})\n" "#endif\n" "\n" "#undef __ai\n" "\n" "#endif /* __ARM_FP16_H */\n" "" } , { "/builtins/arm_mve.h" , "/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_MVE_H\n" "#define __ARM_MVE_H\n" "\n" "#if !__ARM_FEATURE_MVE\n" "#error \"MVE support not enabled\"\n" "#endif\n" "\n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "typedef uint16_t mve_pred16_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;\n" "typedef struct { int16x8_t val[2]; } int16x8x2_t;\n" "typedef struct { int16x8_t val[4]; } int16x8x4_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;\n" "typedef struct { int32x4_t val[2]; } int32x4x2_t;\n" "typedef struct { int32x4_t val[4]; } int32x4x4_t;\n" "typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;\n" "typedef struct { int64x2_t val[2]; } int64x2x2_t;\n" "typedef struct { int64x2_t val[4]; } int64x2x4_t;\n" "typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;\n" "typedef struct { int8x16_t val[2]; } int8x16x2_t;\n" "typedef struct { int8x16_t val[4]; } int8x16x4_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;\n" "typedef struct { uint16x8_t val[2]; } uint16x8x2_t;\n" "typedef struct { uint16x8_t val[4]; } uint16x8x4_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;\n" "typedef struct { uint32x4_t val[2]; } uint32x4x2_t;\n" "typedef struct { uint32x4_t val[4]; } uint32x4x4_t;\n" "typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;\n" "typedef struct { uint64x2_t val[2]; } uint64x2x2_t;\n" "typedef struct { uint64x2_t val[4]; } uint64x2x4_t;\n" "typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;\n" "typedef struct { uint8x16_t val[2]; } uint8x16x2_t;\n" "typedef struct { uint8x16_t val[4]; } uint8x16x4_t;\n" "\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))\n" "int64_t __arm_asrl(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))\n" "uint64_t __arm_lsll(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))\n" "int32_t __arm_sqrshr(int32_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))\n" "int64_t __arm_sqrshrl(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))\n" "int64_t __arm_sqrshrl_sat48(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))\n" "int32_t __arm_sqshl(int32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))\n" "int64_t __arm_sqshll(int64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))\n" "int32_t __arm_srshr(int32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))\n" "int64_t __arm_srshrl(int64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))\n" "uint32_t __arm_uqrshl(uint32_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))\n" "uint64_t __arm_uqrshll(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))\n" "uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))\n" "uint32_t __arm_uqshl(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))\n" "uint64_t __arm_uqshll(uint64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))\n" "uint32_t __arm_urshr(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))\n" "uint64_t __arm_urshrl(uint64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))\n" "uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))\n" "uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))\n" "uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))\n" "uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))\n" "uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))\n" "uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))\n" "uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))\n" "uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))\n" "uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))\n" "uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))\n" "uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))\n" "uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))\n" "uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))\n" "uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))\n" "uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))\n" "uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))\n" "uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))\n" "uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))\n" "uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))\n" "uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))\n" "uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))\n" "uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))\n" "uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))\n" "uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))\n" "int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))\n" "int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))\n" "int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))\n" "int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))\n" "int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))\n" "int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))\n" "uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))\n" "uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))\n" "uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))\n" "uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))\n" "uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))\n" "uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))\n" "int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))\n" "int16x8_t __arm_vabdq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))\n" "int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))\n" "int32x4_t __arm_vabdq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))\n" "int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))\n" "int8x16_t __arm_vabdq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))\n" "uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))\n" "uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))\n" "uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))\n" "uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))\n" "uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))\n" "uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))\n" "int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))\n" "int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))\n" "int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))\n" "int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))\n" "int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))\n" "int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))\n" "uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))\n" "uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))\n" "uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))\n" "uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))\n" "uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))\n" "uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))\n" "int16x8_t __arm_vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))\n" "int16x8_t __arm_vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))\n" "int32x4_t __arm_vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))\n" "int32x4_t __arm_vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))\n" "int8x16_t __arm_vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))\n" "int8x16_t __arm_vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))\n" "int16x8_t __arm_vabsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))\n" "int16x8_t __arm_vabsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))\n" "int32x4_t __arm_vabsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))\n" "int32x4_t __arm_vabsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))\n" "int8x16_t __arm_vabsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))\n" "int8x16_t __arm_vabsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))\n" "int16x8_t __arm_vabsq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))\n" "int16x8_t __arm_vabsq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))\n" "int32x4_t __arm_vabsq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))\n" "int32x4_t __arm_vabsq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))\n" "int8x16_t __arm_vabsq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))\n" "int8x16_t __arm_vabsq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))\n" "int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))\n" "int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))\n" "uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))\n" "uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))\n" "int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))\n" "int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))\n" "uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))\n" "uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))\n" "int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))\n" "int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))\n" "uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))\n" "uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))\n" "int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))\n" "int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))\n" "uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))\n" "uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))\n" "int64_t __arm_vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))\n" "int64_t __arm_vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))\n" "uint64_t __arm_vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))\n" "uint64_t __arm_vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))\n" "int64_t __arm_vaddlvaq_s32(int64_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))\n" "int64_t __arm_vaddlvaq(int64_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))\n" "uint64_t __arm_vaddlvaq_u32(uint64_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))\n" "uint64_t __arm_vaddlvaq(uint64_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))\n" "int64_t __arm_vaddlvq_p_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))\n" "int64_t __arm_vaddlvq_p(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))\n" "uint64_t __arm_vaddlvq_p_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))\n" "uint64_t __arm_vaddlvq_p(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))\n" "int64_t __arm_vaddlvq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))\n" "int64_t __arm_vaddlvq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))\n" "uint64_t __arm_vaddlvq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))\n" "uint64_t __arm_vaddlvq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))\n" "int16x8_t __arm_vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))\n" "int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))\n" "int32x4_t __arm_vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))\n" "int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))\n" "int8x16_t __arm_vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))\n" "int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))\n" "uint16x8_t __arm_vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))\n" "uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))\n" "uint32x4_t __arm_vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))\n" "uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))\n" "uint8x16_t __arm_vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))\n" "uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))\n" "int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))\n" "int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))\n" "int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))\n" "int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))\n" "int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))\n" "int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))\n" "uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))\n" "uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))\n" "uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))\n" "uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))\n" "uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))\n" "uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))\n" "int16x8_t __arm_vaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))\n" "int16x8_t __arm_vaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))\n" "int32x4_t __arm_vaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))\n" "int32x4_t __arm_vaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))\n" "int8x16_t __arm_vaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))\n" "int8x16_t __arm_vaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))\n" "uint16x8_t __arm_vaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))\n" "uint16x8_t __arm_vaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))\n" "uint32x4_t __arm_vaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))\n" "uint32x4_t __arm_vaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))\n" "uint8x16_t __arm_vaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))\n" "uint8x16_t __arm_vaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))\n" "int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))\n" "int16x8_t __arm_vaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))\n" "int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))\n" "int32x4_t __arm_vaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))\n" "int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))\n" "int8x16_t __arm_vaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))\n" "uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))\n" "uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))\n" "uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))\n" "uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))\n" "uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))\n" "uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))\n" "int16x8_t __arm_vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))\n" "int16x8_t __arm_vaddq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))\n" "int32x4_t __arm_vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))\n" "int32x4_t __arm_vaddq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))\n" "int8x16_t __arm_vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))\n" "int8x16_t __arm_vaddq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))\n" "uint16x8_t __arm_vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))\n" "uint16x8_t __arm_vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))\n" "uint32x4_t __arm_vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))\n" "uint32x4_t __arm_vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))\n" "uint8x16_t __arm_vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))\n" "uint8x16_t __arm_vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))\n" "int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))\n" "int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))\n" "int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))\n" "int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))\n" "int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))\n" "int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))\n" "uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))\n" "uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))\n" "uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))\n" "uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))\n" "uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))\n" "uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))\n" "int32_t __arm_vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))\n" "int32_t __arm_vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))\n" "int32_t __arm_vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))\n" "int32_t __arm_vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))\n" "int32_t __arm_vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))\n" "int32_t __arm_vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))\n" "uint32_t __arm_vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))\n" "uint32_t __arm_vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))\n" "uint32_t __arm_vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))\n" "uint32_t __arm_vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))\n" "uint32_t __arm_vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))\n" "uint32_t __arm_vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))\n" "int32_t __arm_vaddvaq_s16(int32_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))\n" "int32_t __arm_vaddvaq(int32_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))\n" "int32_t __arm_vaddvaq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))\n" "int32_t __arm_vaddvaq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))\n" "int32_t __arm_vaddvaq_s8(int32_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))\n" "int32_t __arm_vaddvaq(int32_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))\n" "uint32_t __arm_vaddvaq_u16(uint32_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))\n" "uint32_t __arm_vaddvaq(uint32_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))\n" "uint32_t __arm_vaddvaq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))\n" "uint32_t __arm_vaddvaq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))\n" "uint32_t __arm_vaddvaq_u8(uint32_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))\n" "uint32_t __arm_vaddvaq(uint32_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))\n" "int32_t __arm_vaddvq_p_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))\n" "int32_t __arm_vaddvq_p(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))\n" "int32_t __arm_vaddvq_p_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))\n" "int32_t __arm_vaddvq_p(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))\n" "int32_t __arm_vaddvq_p_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))\n" "int32_t __arm_vaddvq_p(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))\n" "uint32_t __arm_vaddvq_p_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))\n" "uint32_t __arm_vaddvq_p(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))\n" "uint32_t __arm_vaddvq_p_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))\n" "uint32_t __arm_vaddvq_p(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))\n" "uint32_t __arm_vaddvq_p_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))\n" "uint32_t __arm_vaddvq_p(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))\n" "int32_t __arm_vaddvq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))\n" "int32_t __arm_vaddvq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))\n" "int32_t __arm_vaddvq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))\n" "int32_t __arm_vaddvq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))\n" "int32_t __arm_vaddvq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))\n" "int32_t __arm_vaddvq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))\n" "uint32_t __arm_vaddvq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))\n" "uint32_t __arm_vaddvq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))\n" "uint32_t __arm_vaddvq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))\n" "uint32_t __arm_vaddvq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))\n" "uint32_t __arm_vaddvq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))\n" "uint32_t __arm_vaddvq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))\n" "int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))\n" "int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))\n" "int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))\n" "int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))\n" "int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))\n" "int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))\n" "uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))\n" "uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))\n" "uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))\n" "uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))\n" "uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))\n" "uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))\n" "int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))\n" "int16x8_t __arm_vandq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))\n" "int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))\n" "int32x4_t __arm_vandq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))\n" "int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))\n" "int8x16_t __arm_vandq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))\n" "uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))\n" "uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))\n" "uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))\n" "uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))\n" "uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))\n" "uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))\n" "int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))\n" "int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))\n" "int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))\n" "int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))\n" "int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))\n" "int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))\n" "uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))\n" "uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))\n" "uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))\n" "uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))\n" "uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))\n" "uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))\n" "int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))\n" "int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))\n" "int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))\n" "int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))\n" "uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))\n" "uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))\n" "uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))\n" "uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))\n" "int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))\n" "int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))\n" "int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))\n" "int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))\n" "int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))\n" "int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))\n" "uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))\n" "uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))\n" "uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))\n" "uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))\n" "uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))\n" "uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))\n" "int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))\n" "int16x8_t __arm_vbicq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))\n" "int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))\n" "int32x4_t __arm_vbicq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))\n" "uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))\n" "uint16x8_t __arm_vbicq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))\n" "uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))\n" "uint32x4_t __arm_vbicq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))\n" "int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))\n" "int16x8_t __arm_vbicq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))\n" "int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))\n" "int32x4_t __arm_vbicq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))\n" "int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))\n" "int8x16_t __arm_vbicq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))\n" "uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))\n" "uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))\n" "uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))\n" "uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))\n" "uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))\n" "uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))\n" "int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))\n" "int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))\n" "int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))\n" "int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))\n" "int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))\n" "int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))\n" "uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))\n" "uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))\n" "uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))\n" "uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))\n" "uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))\n" "uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))\n" "int16x8_t __arm_vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))\n" "int16x8_t __arm_vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))\n" "int32x4_t __arm_vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))\n" "int32x4_t __arm_vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))\n" "int8x16_t __arm_vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))\n" "int8x16_t __arm_vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))\n" "uint16x8_t __arm_vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))\n" "uint16x8_t __arm_vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))\n" "uint32x4_t __arm_vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))\n" "uint32x4_t __arm_vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))\n" "uint8x16_t __arm_vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))\n" "uint8x16_t __arm_vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))\n" "int16x8_t __arm_vbrsrq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))\n" "int16x8_t __arm_vbrsrq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))\n" "int32x4_t __arm_vbrsrq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))\n" "int32x4_t __arm_vbrsrq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))\n" "int8x16_t __arm_vbrsrq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))\n" "int8x16_t __arm_vbrsrq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))\n" "uint16x8_t __arm_vbrsrq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))\n" "uint16x8_t __arm_vbrsrq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))\n" "uint32x4_t __arm_vbrsrq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))\n" "uint32x4_t __arm_vbrsrq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))\n" "uint8x16_t __arm_vbrsrq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))\n" "uint8x16_t __arm_vbrsrq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))\n" "int16x8_t __arm_vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))\n" "int16x8_t __arm_vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))\n" "int32x4_t __arm_vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))\n" "int32x4_t __arm_vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))\n" "int8x16_t __arm_vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))\n" "int8x16_t __arm_vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))\n" "uint16x8_t __arm_vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))\n" "uint16x8_t __arm_vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))\n" "uint32x4_t __arm_vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))\n" "uint32x4_t __arm_vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))\n" "uint8x16_t __arm_vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))\n" "uint8x16_t __arm_vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))\n" "int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))\n" "int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))\n" "int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))\n" "int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))\n" "int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))\n" "int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))\n" "uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))\n" "uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))\n" "uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))\n" "uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))\n" "uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))\n" "uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))\n" "int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))\n" "int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))\n" "int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))\n" "int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))\n" "int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))\n" "int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))\n" "uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))\n" "uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))\n" "uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))\n" "uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))\n" "uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))\n" "uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))\n" "int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))\n" "int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))\n" "int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))\n" "int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))\n" "int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))\n" "int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))\n" "uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))\n" "uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))\n" "uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))\n" "uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))\n" "uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))\n" "uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))\n" "int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))\n" "int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))\n" "int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))\n" "int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))\n" "int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))\n" "int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))\n" "uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))\n" "uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))\n" "uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))\n" "uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))\n" "uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))\n" "uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))\n" "int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))\n" "int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))\n" "int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))\n" "int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))\n" "int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))\n" "int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))\n" "uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))\n" "uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))\n" "uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))\n" "uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))\n" "uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))\n" "uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))\n" "int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))\n" "int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))\n" "int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))\n" "int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))\n" "int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))\n" "int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))\n" "uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))\n" "uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))\n" "uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))\n" "uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))\n" "uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))\n" "uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))\n" "int16x8_t __arm_vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))\n" "int16x8_t __arm_vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))\n" "int32x4_t __arm_vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))\n" "int32x4_t __arm_vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))\n" "int8x16_t __arm_vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))\n" "int8x16_t __arm_vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))\n" "int16x8_t __arm_vclsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))\n" "int16x8_t __arm_vclsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))\n" "int32x4_t __arm_vclsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))\n" "int32x4_t __arm_vclsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))\n" "int8x16_t __arm_vclsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))\n" "int8x16_t __arm_vclsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))\n" "int16x8_t __arm_vclsq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))\n" "int16x8_t __arm_vclsq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))\n" "int32x4_t __arm_vclsq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))\n" "int32x4_t __arm_vclsq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))\n" "int8x16_t __arm_vclsq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))\n" "int8x16_t __arm_vclsq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))\n" "int16x8_t __arm_vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))\n" "int16x8_t __arm_vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))\n" "int32x4_t __arm_vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))\n" "int32x4_t __arm_vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))\n" "int8x16_t __arm_vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))\n" "int8x16_t __arm_vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))\n" "uint16x8_t __arm_vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))\n" "uint16x8_t __arm_vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))\n" "uint32x4_t __arm_vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))\n" "uint32x4_t __arm_vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))\n" "uint8x16_t __arm_vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))\n" "uint8x16_t __arm_vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))\n" "int16x8_t __arm_vclzq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))\n" "int16x8_t __arm_vclzq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))\n" "int32x4_t __arm_vclzq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))\n" "int32x4_t __arm_vclzq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))\n" "int8x16_t __arm_vclzq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))\n" "int8x16_t __arm_vclzq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))\n" "uint16x8_t __arm_vclzq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))\n" "uint16x8_t __arm_vclzq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))\n" "uint32x4_t __arm_vclzq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))\n" "uint32x4_t __arm_vclzq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))\n" "uint8x16_t __arm_vclzq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))\n" "uint8x16_t __arm_vclzq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))\n" "int16x8_t __arm_vclzq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))\n" "int16x8_t __arm_vclzq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))\n" "int32x4_t __arm_vclzq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))\n" "int32x4_t __arm_vclzq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))\n" "int8x16_t __arm_vclzq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))\n" "int8x16_t __arm_vclzq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))\n" "uint16x8_t __arm_vclzq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))\n" "uint16x8_t __arm_vclzq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))\n" "uint32x4_t __arm_vclzq_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))\n" "uint32x4_t __arm_vclzq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))\n" "uint8x16_t __arm_vclzq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))\n" "uint8x16_t __arm_vclzq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))\n" "mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))\n" "mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))\n" "mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))\n" "mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))\n" "mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))\n" "mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))\n" "mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))\n" "mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))\n" "mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))\n" "mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))\n" "mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))\n" "mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))\n" "mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))\n" "mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))\n" "mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))\n" "mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))\n" "mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))\n" "mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))\n" "mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))\n" "mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))\n" "mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))\n" "mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))\n" "mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))\n" "mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))\n" "mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))\n" "mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))\n" "mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))\n" "mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))\n" "mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))\n" "mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))\n" "mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))\n" "mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))\n" "mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))\n" "mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))\n" "mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))\n" "mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))\n" "mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))\n" "mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))\n" "mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))\n" "mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))\n" "mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))\n" "mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))\n" "mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))\n" "mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))\n" "mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))\n" "mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))\n" "mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))\n" "mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))\n" "mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))\n" "mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))\n" "mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))\n" "mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))\n" "mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))\n" "mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))\n" "mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))\n" "mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))\n" "mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))\n" "mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))\n" "mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))\n" "mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))\n" "mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))\n" "mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))\n" "mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))\n" "mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))\n" "mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))\n" "mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))\n" "mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))\n" "mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))\n" "mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))\n" "mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))\n" "mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))\n" "mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))\n" "mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))\n" "mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))\n" "mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))\n" "mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))\n" "mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))\n" "mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))\n" "mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))\n" "mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))\n" "mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))\n" "mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))\n" "mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))\n" "mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))\n" "mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))\n" "mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))\n" "mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))\n" "mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))\n" "mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))\n" "mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))\n" "mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))\n" "mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))\n" "mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))\n" "mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))\n" "mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))\n" "mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))\n" "mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))\n" "mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))\n" "mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))\n" "mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))\n" "mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))\n" "mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))\n" "mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))\n" "mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))\n" "mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))\n" "mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))\n" "mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))\n" "mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))\n" "mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))\n" "mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))\n" "mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))\n" "mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))\n" "mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))\n" "mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))\n" "mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))\n" "mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))\n" "mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))\n" "mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))\n" "mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))\n" "mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))\n" "mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))\n" "mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))\n" "mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))\n" "mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))\n" "mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))\n" "mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))\n" "mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))\n" "mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))\n" "mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))\n" "mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))\n" "mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))\n" "mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))\n" "mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))\n" "mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))\n" "mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))\n" "mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))\n" "mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))\n" "mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))\n" "mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))\n" "mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))\n" "mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))\n" "mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))\n" "mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))\n" "mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))\n" "mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))\n" "mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))\n" "mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))\n" "mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))\n" "mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))\n" "mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))\n" "mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))\n" "mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))\n" "mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))\n" "mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))\n" "mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))\n" "mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))\n" "mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))\n" "mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))\n" "mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))\n" "mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))\n" "mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))\n" "mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))\n" "mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))\n" "mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))\n" "mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))\n" "mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))\n" "mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))\n" "mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))\n" "mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))\n" "mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))\n" "mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))\n" "mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))\n" "mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))\n" "mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))\n" "mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))\n" "mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))\n" "mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))\n" "mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))\n" "mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))\n" "mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))\n" "mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))\n" "mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))\n" "mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))\n" "mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))\n" "mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))\n" "mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))\n" "mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))\n" "mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))\n" "int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))\n" "int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))\n" "int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))\n" "int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))\n" "uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))\n" "uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))\n" "uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))\n" "uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))\n" "mve_pred16_t __arm_vctp16q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))\n" "mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))\n" "mve_pred16_t __arm_vctp32q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))\n" "mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))\n" "mve_pred16_t __arm_vctp64q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))\n" "mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))\n" "mve_pred16_t __arm_vctp8q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))\n" "mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))\n" "uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))\n" "uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))\n" "uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))\n" "uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))\n" "uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))\n" "uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))\n" "uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))\n" "uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))\n" "uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))\n" "uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))\n" "uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))\n" "uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))\n" "uint16x8_t __arm_vddupq_n_u16(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))\n" "uint16x8_t __arm_vddupq_u16(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))\n" "uint32x4_t __arm_vddupq_n_u32(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))\n" "uint32x4_t __arm_vddupq_u32(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))\n" "uint8x16_t __arm_vddupq_n_u8(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))\n" "uint8x16_t __arm_vddupq_u8(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))\n" "uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))\n" "uint16x8_t __arm_vddupq_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))\n" "uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))\n" "uint32x4_t __arm_vddupq_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))\n" "uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))\n" "uint8x16_t __arm_vddupq_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))\n" "uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))\n" "uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))\n" "uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))\n" "uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))\n" "uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))\n" "uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))\n" "uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))\n" "uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))\n" "uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))\n" "uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))\n" "uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))\n" "uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))\n" "int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))\n" "int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))\n" "int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))\n" "int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))\n" "int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))\n" "int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))\n" "uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))\n" "uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))\n" "uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))\n" "uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))\n" "uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))\n" "uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))\n" "int16x8_t __arm_vdupq_n_s16(int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))\n" "int32x4_t __arm_vdupq_n_s32(int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))\n" "int8x16_t __arm_vdupq_n_s8(int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))\n" "uint16x8_t __arm_vdupq_n_u16(uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))\n" "uint32x4_t __arm_vdupq_n_u32(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))\n" "uint8x16_t __arm_vdupq_n_u8(uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))\n" "int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))\n" "int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))\n" "int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))\n" "uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))\n" "uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))\n" "uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))\n" "uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))\n" "uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))\n" "uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))\n" "uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))\n" "uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))\n" "uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))\n" "uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))\n" "uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))\n" "uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))\n" "uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))\n" "uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))\n" "uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))\n" "uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))\n" "uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))\n" "uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))\n" "uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))\n" "uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))\n" "uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))\n" "uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))\n" "uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))\n" "uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))\n" "int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))\n" "int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))\n" "int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))\n" "int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))\n" "int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))\n" "int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))\n" "uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))\n" "uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))\n" "uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))\n" "uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))\n" "uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))\n" "uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))\n" "int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))\n" "int16x8_t __arm_veorq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))\n" "int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))\n" "int32x4_t __arm_veorq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))\n" "int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))\n" "int8x16_t __arm_veorq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))\n" "uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))\n" "uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))\n" "uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))\n" "uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))\n" "uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))\n" "uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))\n" "int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))\n" "int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))\n" "int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))\n" "int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))\n" "int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))\n" "int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))\n" "uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))\n" "uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))\n" "uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))\n" "uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))\n" "uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))\n" "uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))\n" "int16_t __arm_vgetq_lane_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))\n" "int16_t __arm_vgetq_lane(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))\n" "int32_t __arm_vgetq_lane_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))\n" "int32_t __arm_vgetq_lane(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))\n" "int64_t __arm_vgetq_lane_s64(int64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))\n" "int64_t __arm_vgetq_lane(int64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))\n" "int8_t __arm_vgetq_lane_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))\n" "int8_t __arm_vgetq_lane(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))\n" "uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))\n" "uint16_t __arm_vgetq_lane(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))\n" "uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))\n" "uint32_t __arm_vgetq_lane(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))\n" "uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))\n" "uint64_t __arm_vgetq_lane(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))\n" "uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))\n" "uint8_t __arm_vgetq_lane(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))\n" "int16x8_t __arm_vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))\n" "int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))\n" "int32x4_t __arm_vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))\n" "int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))\n" "int8x16_t __arm_vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))\n" "int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))\n" "uint16x8_t __arm_vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))\n" "uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))\n" "uint32x4_t __arm_vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))\n" "uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))\n" "uint8x16_t __arm_vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))\n" "uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))\n" "int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))\n" "int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))\n" "int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))\n" "int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))\n" "int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))\n" "int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))\n" "uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))\n" "uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))\n" "uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))\n" "uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))\n" "uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))\n" "uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))\n" "int16x8_t __arm_vhaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))\n" "int16x8_t __arm_vhaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))\n" "int32x4_t __arm_vhaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))\n" "int32x4_t __arm_vhaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))\n" "int8x16_t __arm_vhaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))\n" "int8x16_t __arm_vhaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))\n" "uint16x8_t __arm_vhaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))\n" "uint16x8_t __arm_vhaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))\n" "uint32x4_t __arm_vhaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))\n" "uint32x4_t __arm_vhaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))\n" "uint8x16_t __arm_vhaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))\n" "uint8x16_t __arm_vhaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))\n" "int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))\n" "int16x8_t __arm_vhaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))\n" "int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))\n" "int32x4_t __arm_vhaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))\n" "int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))\n" "int8x16_t __arm_vhaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))\n" "uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))\n" "uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))\n" "uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))\n" "uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))\n" "uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))\n" "uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))\n" "int16x8_t __arm_vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))\n" "int16x8_t __arm_vhaddq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))\n" "int32x4_t __arm_vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))\n" "int32x4_t __arm_vhaddq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))\n" "int8x16_t __arm_vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))\n" "int8x16_t __arm_vhaddq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))\n" "uint16x8_t __arm_vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))\n" "uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))\n" "uint32x4_t __arm_vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))\n" "uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))\n" "uint8x16_t __arm_vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))\n" "uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))\n" "int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))\n" "int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))\n" "int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))\n" "int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))\n" "int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))\n" "int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))\n" "uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))\n" "uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))\n" "uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))\n" "uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))\n" "uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))\n" "uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))\n" "int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))\n" "int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))\n" "int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))\n" "int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))\n" "int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))\n" "int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))\n" "int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))\n" "int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))\n" "int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))\n" "int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))\n" "int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))\n" "int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))\n" "int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))\n" "int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))\n" "int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))\n" "int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))\n" "int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))\n" "int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))\n" "int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))\n" "int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))\n" "int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))\n" "int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))\n" "int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))\n" "int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))\n" "int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))\n" "int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))\n" "int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))\n" "int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))\n" "int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))\n" "int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))\n" "int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))\n" "int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))\n" "int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))\n" "int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))\n" "int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))\n" "int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))\n" "int16x8_t __arm_vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))\n" "int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))\n" "int32x4_t __arm_vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))\n" "int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))\n" "int8x16_t __arm_vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))\n" "int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))\n" "uint16x8_t __arm_vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))\n" "uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))\n" "uint32x4_t __arm_vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))\n" "uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))\n" "uint8x16_t __arm_vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))\n" "uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))\n" "int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))\n" "int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))\n" "int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))\n" "int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))\n" "int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))\n" "int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))\n" "uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))\n" "uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))\n" "uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))\n" "uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))\n" "uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))\n" "uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))\n" "int16x8_t __arm_vhsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))\n" "int16x8_t __arm_vhsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))\n" "int32x4_t __arm_vhsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))\n" "int32x4_t __arm_vhsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))\n" "int8x16_t __arm_vhsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))\n" "int8x16_t __arm_vhsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))\n" "uint16x8_t __arm_vhsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))\n" "uint16x8_t __arm_vhsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))\n" "uint32x4_t __arm_vhsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))\n" "uint32x4_t __arm_vhsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))\n" "uint8x16_t __arm_vhsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))\n" "uint8x16_t __arm_vhsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))\n" "int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))\n" "int16x8_t __arm_vhsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))\n" "int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))\n" "int32x4_t __arm_vhsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))\n" "int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))\n" "int8x16_t __arm_vhsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))\n" "uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))\n" "uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))\n" "uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))\n" "uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))\n" "uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))\n" "uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))\n" "int16x8_t __arm_vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))\n" "int16x8_t __arm_vhsubq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))\n" "int32x4_t __arm_vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))\n" "int32x4_t __arm_vhsubq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))\n" "int8x16_t __arm_vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))\n" "int8x16_t __arm_vhsubq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))\n" "uint16x8_t __arm_vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))\n" "uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))\n" "uint32x4_t __arm_vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))\n" "uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))\n" "uint8x16_t __arm_vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))\n" "uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))\n" "int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))\n" "int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))\n" "int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))\n" "int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))\n" "int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))\n" "int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))\n" "uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))\n" "uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))\n" "uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))\n" "uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))\n" "uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))\n" "uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))\n" "uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))\n" "uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))\n" "uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))\n" "uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))\n" "uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))\n" "uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))\n" "uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))\n" "uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))\n" "uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))\n" "uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))\n" "uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))\n" "uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))\n" "uint16x8_t __arm_vidupq_n_u16(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))\n" "uint16x8_t __arm_vidupq_u16(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))\n" "uint32x4_t __arm_vidupq_n_u32(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))\n" "uint32x4_t __arm_vidupq_u32(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))\n" "uint8x16_t __arm_vidupq_n_u8(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))\n" "uint8x16_t __arm_vidupq_u8(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))\n" "uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))\n" "uint16x8_t __arm_vidupq_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))\n" "uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))\n" "uint32x4_t __arm_vidupq_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))\n" "uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))\n" "uint8x16_t __arm_vidupq_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))\n" "uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))\n" "uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))\n" "uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))\n" "uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))\n" "uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))\n" "uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))\n" "uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))\n" "uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))\n" "uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))\n" "uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))\n" "uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))\n" "uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))\n" "uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))\n" "uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))\n" "uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))\n" "uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))\n" "uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))\n" "uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))\n" "uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))\n" "uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))\n" "uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))\n" "uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))\n" "uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))\n" "uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))\n" "uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))\n" "uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))\n" "uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))\n" "uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))\n" "uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))\n" "uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))\n" "uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))\n" "uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))\n" "uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))\n" "uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))\n" "uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))\n" "uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))\n" "uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))\n" "uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))\n" "uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))\n" "uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))\n" "uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))\n" "uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))\n" "uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))\n" "uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))\n" "uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))\n" "uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))\n" "uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))\n" "uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))\n" "int16x8_t __arm_vld1q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))\n" "int16x8_t __arm_vld1q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))\n" "int32x4_t __arm_vld1q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))\n" "int32x4_t __arm_vld1q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))\n" "int8x16_t __arm_vld1q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))\n" "int8x16_t __arm_vld1q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))\n" "uint16x8_t __arm_vld1q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))\n" "uint16x8_t __arm_vld1q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))\n" "uint32x4_t __arm_vld1q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))\n" "uint32x4_t __arm_vld1q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))\n" "uint8x16_t __arm_vld1q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))\n" "uint8x16_t __arm_vld1q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))\n" "int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))\n" "int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))\n" "int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))\n" "int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))\n" "int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))\n" "int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))\n" "uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))\n" "uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))\n" "uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))\n" "uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))\n" "uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))\n" "uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))\n" "int16x8x2_t __arm_vld2q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))\n" "int16x8x2_t __arm_vld2q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))\n" "int32x4x2_t __arm_vld2q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))\n" "int32x4x2_t __arm_vld2q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))\n" "int8x16x2_t __arm_vld2q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))\n" "int8x16x2_t __arm_vld2q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))\n" "uint16x8x2_t __arm_vld2q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))\n" "uint16x8x2_t __arm_vld2q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))\n" "uint32x4x2_t __arm_vld2q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))\n" "uint32x4x2_t __arm_vld2q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))\n" "uint8x16x2_t __arm_vld2q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))\n" "uint8x16x2_t __arm_vld2q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))\n" "int16x8x4_t __arm_vld4q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))\n" "int16x8x4_t __arm_vld4q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))\n" "int32x4x4_t __arm_vld4q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))\n" "int32x4x4_t __arm_vld4q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))\n" "int8x16x4_t __arm_vld4q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))\n" "int8x16x4_t __arm_vld4q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))\n" "uint16x8x4_t __arm_vld4q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))\n" "uint16x8x4_t __arm_vld4q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))\n" "uint32x4x4_t __arm_vld4q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))\n" "uint32x4x4_t __arm_vld4q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))\n" "uint8x16x4_t __arm_vld4q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))\n" "uint8x16x4_t __arm_vld4q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))\n" "int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))\n" "int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))\n" "int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))\n" "int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))\n" "int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))\n" "int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))\n" "uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))\n" "uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))\n" "uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))\n" "uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))\n" "int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))\n" "int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))\n" "int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))\n" "int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))\n" "uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))\n" "uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))\n" "uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))\n" "uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))\n" "int16x8_t __arm_vldrbq_s16(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))\n" "int32x4_t __arm_vldrbq_s32(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))\n" "int8x16_t __arm_vldrbq_s8(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))\n" "uint16x8_t __arm_vldrbq_u16(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))\n" "uint32x4_t __arm_vldrbq_u32(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))\n" "uint8x16_t __arm_vldrbq_u8(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))\n" "int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))\n" "int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))\n" "int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))\n" "uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))\n" "uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))\n" "uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))\n" "int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))\n" "uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))\n" "int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))\n" "uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))\n" "int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))\n" "int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))\n" "uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))\n" "uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))\n" "int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))\n" "int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))\n" "uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))\n" "uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))\n" "int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))\n" "uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))\n" "int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))\n" "int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))\n" "int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))\n" "int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))\n" "uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))\n" "uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))\n" "int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))\n" "int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))\n" "uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))\n" "uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))\n" "int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))\n" "int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))\n" "int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))\n" "int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))\n" "uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))\n" "uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))\n" "uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))\n" "uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))\n" "int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))\n" "int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))\n" "int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))\n" "int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))\n" "uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))\n" "uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))\n" "uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))\n" "uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))\n" "int16x8_t __arm_vldrhq_s16(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))\n" "int32x4_t __arm_vldrhq_s32(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))\n" "uint16x8_t __arm_vldrhq_u16(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))\n" "uint32x4_t __arm_vldrhq_u32(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))\n" "int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))\n" "int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))\n" "uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))\n" "uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))\n" "int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))\n" "uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))\n" "int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))\n" "uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))\n" "int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))\n" "int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))\n" "uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))\n" "int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))\n" "int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))\n" "uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))\n" "uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))\n" "int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))\n" "uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))\n" "int32x4_t __arm_vldrwq_s32(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))\n" "uint32x4_t __arm_vldrwq_u32(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))\n" "int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))\n" "uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))\n" "uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))\n" "uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))\n" "uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))\n" "uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))\n" "uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))\n" "uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))\n" "uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))\n" "uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))\n" "uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))\n" "uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))\n" "uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))\n" "uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))\n" "uint16_t __arm_vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))\n" "uint16_t __arm_vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))\n" "uint32_t __arm_vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))\n" "uint32_t __arm_vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))\n" "uint8_t __arm_vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))\n" "uint8_t __arm_vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))\n" "uint16_t __arm_vmaxavq_s16(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))\n" "uint16_t __arm_vmaxavq(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))\n" "uint32_t __arm_vmaxavq_s32(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))\n" "uint32_t __arm_vmaxavq(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))\n" "uint8_t __arm_vmaxavq_s8(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))\n" "uint8_t __arm_vmaxavq(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))\n" "int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))\n" "int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))\n" "int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))\n" "int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))\n" "int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))\n" "int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))\n" "uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))\n" "uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))\n" "uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))\n" "uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))\n" "uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))\n" "uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))\n" "int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))\n" "int16x8_t __arm_vmaxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))\n" "int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))\n" "int32x4_t __arm_vmaxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))\n" "int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))\n" "int8x16_t __arm_vmaxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))\n" "uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))\n" "uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))\n" "uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))\n" "uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))\n" "uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))\n" "uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))\n" "int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))\n" "int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))\n" "int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))\n" "int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))\n" "int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))\n" "int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))\n" "uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))\n" "uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))\n" "uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))\n" "uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))\n" "uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))\n" "uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))\n" "int16_t __arm_vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))\n" "int16_t __arm_vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))\n" "int32_t __arm_vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))\n" "int32_t __arm_vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))\n" "int8_t __arm_vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))\n" "int8_t __arm_vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))\n" "uint16_t __arm_vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))\n" "uint16_t __arm_vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))\n" "uint32_t __arm_vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))\n" "uint32_t __arm_vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))\n" "uint8_t __arm_vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))\n" "uint8_t __arm_vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))\n" "int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))\n" "int16_t __arm_vmaxvq(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))\n" "int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))\n" "int32_t __arm_vmaxvq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))\n" "int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))\n" "int8_t __arm_vmaxvq(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))\n" "uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))\n" "uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))\n" "uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))\n" "uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))\n" "uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))\n" "uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))\n" "uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))\n" "uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))\n" "uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))\n" "uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))\n" "uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))\n" "uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))\n" "uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))\n" "uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))\n" "uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))\n" "uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))\n" "uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))\n" "uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))\n" "uint16_t __arm_vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))\n" "uint16_t __arm_vminavq_p(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))\n" "uint32_t __arm_vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))\n" "uint32_t __arm_vminavq_p(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))\n" "uint8_t __arm_vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))\n" "uint8_t __arm_vminavq_p(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))\n" "uint16_t __arm_vminavq_s16(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))\n" "uint16_t __arm_vminavq(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))\n" "uint32_t __arm_vminavq_s32(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))\n" "uint32_t __arm_vminavq(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))\n" "uint8_t __arm_vminavq_s8(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))\n" "uint8_t __arm_vminavq(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))\n" "int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))\n" "int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))\n" "int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))\n" "int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))\n" "int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))\n" "int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))\n" "uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))\n" "uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))\n" "uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))\n" "uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))\n" "uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))\n" "uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))\n" "int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))\n" "int16x8_t __arm_vminq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))\n" "int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))\n" "int32x4_t __arm_vminq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))\n" "int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))\n" "int8x16_t __arm_vminq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))\n" "uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))\n" "uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))\n" "uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))\n" "uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))\n" "uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))\n" "uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))\n" "int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))\n" "int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))\n" "int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))\n" "int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))\n" "int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))\n" "int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))\n" "uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))\n" "uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))\n" "uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))\n" "uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))\n" "uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))\n" "uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))\n" "int16_t __arm_vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))\n" "int16_t __arm_vminvq_p(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))\n" "int32_t __arm_vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))\n" "int32_t __arm_vminvq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))\n" "int8_t __arm_vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))\n" "int8_t __arm_vminvq_p(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))\n" "uint16_t __arm_vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))\n" "uint16_t __arm_vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))\n" "uint32_t __arm_vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))\n" "uint32_t __arm_vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))\n" "uint8_t __arm_vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))\n" "uint8_t __arm_vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))\n" "int16_t __arm_vminvq_s16(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))\n" "int16_t __arm_vminvq(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))\n" "int32_t __arm_vminvq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))\n" "int32_t __arm_vminvq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))\n" "int8_t __arm_vminvq_s8(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))\n" "int8_t __arm_vminvq(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))\n" "uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))\n" "uint16_t __arm_vminvq(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))\n" "uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))\n" "uint32_t __arm_vminvq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))\n" "uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))\n" "uint8_t __arm_vminvq(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))\n" "int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))\n" "int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))\n" "int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))\n" "int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))\n" "int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))\n" "int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))\n" "uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))\n" "uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))\n" "uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))\n" "uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))\n" "uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))\n" "uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))\n" "int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))\n" "int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))\n" "int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))\n" "int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))\n" "int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))\n" "int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))\n" "uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))\n" "uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))\n" "uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))\n" "uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))\n" "uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))\n" "uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))\n" "int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))\n" "int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))\n" "int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))\n" "int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))\n" "int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))\n" "int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))\n" "int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))\n" "int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))\n" "int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))\n" "int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))\n" "int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))\n" "int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))\n" "int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))\n" "int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))\n" "int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))\n" "int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))\n" "int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))\n" "int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))\n" "uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))\n" "uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))\n" "uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))\n" "uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))\n" "uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))\n" "uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))\n" "int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))\n" "int32_t __arm_vmladavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))\n" "int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))\n" "int32_t __arm_vmladavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))\n" "int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))\n" "int32_t __arm_vmladavq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))\n" "uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))\n" "uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))\n" "uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))\n" "uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))\n" "uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))\n" "uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))\n" "int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))\n" "int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))\n" "int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))\n" "int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))\n" "int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))\n" "int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))\n" "int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))\n" "int32_t __arm_vmladavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))\n" "int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))\n" "int32_t __arm_vmladavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))\n" "int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))\n" "int32_t __arm_vmladavxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))\n" "int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))\n" "int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))\n" "int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))\n" "int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))\n" "uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))\n" "uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))\n" "uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))\n" "uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))\n" "int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))\n" "int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))\n" "int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))\n" "int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))\n" "uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))\n" "uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))\n" "uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))\n" "uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))\n" "int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))\n" "int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))\n" "int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))\n" "int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))\n" "int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))\n" "int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))\n" "int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))\n" "int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))\n" "int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))\n" "int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))\n" "int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))\n" "int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))\n" "uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))\n" "uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))\n" "uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))\n" "uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))\n" "int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))\n" "int64_t __arm_vmlaldavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))\n" "int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))\n" "int64_t __arm_vmlaldavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))\n" "uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))\n" "uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))\n" "uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))\n" "uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))\n" "int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))\n" "int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))\n" "int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))\n" "int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))\n" "int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))\n" "int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))\n" "int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))\n" "int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))\n" "int16x8_t __arm_vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))\n" "int16x8_t __arm_vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))\n" "int32x4_t __arm_vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))\n" "int32x4_t __arm_vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))\n" "int8x16_t __arm_vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))\n" "int8x16_t __arm_vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))\n" "uint16x8_t __arm_vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))\n" "uint16x8_t __arm_vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))\n" "uint32x4_t __arm_vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))\n" "uint32x4_t __arm_vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))\n" "uint8x16_t __arm_vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))\n" "uint8x16_t __arm_vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))\n" "int16x8_t __arm_vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))\n" "int16x8_t __arm_vmlaq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))\n" "int32x4_t __arm_vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))\n" "int32x4_t __arm_vmlaq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))\n" "int8x16_t __arm_vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))\n" "int8x16_t __arm_vmlaq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))\n" "uint16x8_t __arm_vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))\n" "uint16x8_t __arm_vmlaq(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))\n" "uint32x4_t __arm_vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))\n" "uint32x4_t __arm_vmlaq(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))\n" "uint8x16_t __arm_vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))\n" "uint8x16_t __arm_vmlaq(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))\n" "int16x8_t __arm_vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))\n" "int16x8_t __arm_vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))\n" "int32x4_t __arm_vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))\n" "int32x4_t __arm_vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))\n" "int8x16_t __arm_vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))\n" "int8x16_t __arm_vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))\n" "uint16x8_t __arm_vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))\n" "uint16x8_t __arm_vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))\n" "uint32x4_t __arm_vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))\n" "uint32x4_t __arm_vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))\n" "uint8x16_t __arm_vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))\n" "uint8x16_t __arm_vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))\n" "int16x8_t __arm_vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))\n" "int16x8_t __arm_vmlasq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))\n" "int32x4_t __arm_vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))\n" "int32x4_t __arm_vmlasq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))\n" "int8x16_t __arm_vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))\n" "int8x16_t __arm_vmlasq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))\n" "uint16x8_t __arm_vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))\n" "uint16x8_t __arm_vmlasq(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))\n" "uint32x4_t __arm_vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))\n" "uint32x4_t __arm_vmlasq(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))\n" "uint8x16_t __arm_vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))\n" "uint8x16_t __arm_vmlasq(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))\n" "int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))\n" "int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))\n" "int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))\n" "int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))\n" "int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))\n" "int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))\n" "int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))\n" "int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))\n" "int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))\n" "int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))\n" "int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))\n" "int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))\n" "int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))\n" "int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))\n" "int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))\n" "int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))\n" "int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))\n" "int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))\n" "int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))\n" "int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))\n" "int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))\n" "int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))\n" "int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))\n" "int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))\n" "int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))\n" "int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))\n" "int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))\n" "int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))\n" "int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))\n" "int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))\n" "int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))\n" "int32_t __arm_vmlsdavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))\n" "int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))\n" "int32_t __arm_vmlsdavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))\n" "int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))\n" "int32_t __arm_vmlsdavq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))\n" "int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))\n" "int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))\n" "int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))\n" "int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))\n" "int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))\n" "int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))\n" "int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))\n" "int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))\n" "int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))\n" "int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))\n" "int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))\n" "int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))\n" "int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))\n" "int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))\n" "int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))\n" "int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))\n" "int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))\n" "int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))\n" "int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))\n" "int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))\n" "int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))\n" "int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))\n" "int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))\n" "int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))\n" "int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))\n" "int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))\n" "int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))\n" "int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))\n" "int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))\n" "int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))\n" "int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))\n" "int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))\n" "int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))\n" "int64_t __arm_vmlsldavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))\n" "int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))\n" "int64_t __arm_vmlsldavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))\n" "int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))\n" "int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))\n" "int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))\n" "int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))\n" "int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))\n" "int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))\n" "int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))\n" "int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))\n" "int32x4_t __arm_vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))\n" "int32x4_t __arm_vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))\n" "int16x8_t __arm_vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))\n" "int16x8_t __arm_vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))\n" "uint32x4_t __arm_vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))\n" "uint32x4_t __arm_vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))\n" "uint16x8_t __arm_vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))\n" "uint16x8_t __arm_vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))\n" "int32x4_t __arm_vmovlbq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))\n" "int32x4_t __arm_vmovlbq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))\n" "int16x8_t __arm_vmovlbq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))\n" "int16x8_t __arm_vmovlbq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))\n" "uint32x4_t __arm_vmovlbq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))\n" "uint32x4_t __arm_vmovlbq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))\n" "uint16x8_t __arm_vmovlbq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))\n" "uint16x8_t __arm_vmovlbq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))\n" "int32x4_t __arm_vmovlbq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))\n" "int32x4_t __arm_vmovlbq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))\n" "int16x8_t __arm_vmovlbq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))\n" "int16x8_t __arm_vmovlbq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))\n" "uint32x4_t __arm_vmovlbq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))\n" "uint32x4_t __arm_vmovlbq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))\n" "uint16x8_t __arm_vmovlbq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))\n" "uint16x8_t __arm_vmovlbq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))\n" "int32x4_t __arm_vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))\n" "int32x4_t __arm_vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))\n" "int16x8_t __arm_vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))\n" "int16x8_t __arm_vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))\n" "uint32x4_t __arm_vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))\n" "uint32x4_t __arm_vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))\n" "uint16x8_t __arm_vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))\n" "uint16x8_t __arm_vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))\n" "int32x4_t __arm_vmovltq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))\n" "int32x4_t __arm_vmovltq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))\n" "int16x8_t __arm_vmovltq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))\n" "int16x8_t __arm_vmovltq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))\n" "uint32x4_t __arm_vmovltq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))\n" "uint32x4_t __arm_vmovltq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))\n" "uint16x8_t __arm_vmovltq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))\n" "uint16x8_t __arm_vmovltq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))\n" "int32x4_t __arm_vmovltq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))\n" "int32x4_t __arm_vmovltq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))\n" "int16x8_t __arm_vmovltq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))\n" "int16x8_t __arm_vmovltq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))\n" "uint32x4_t __arm_vmovltq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))\n" "uint32x4_t __arm_vmovltq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))\n" "uint16x8_t __arm_vmovltq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))\n" "uint16x8_t __arm_vmovltq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))\n" "int8x16_t __arm_vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))\n" "int8x16_t __arm_vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))\n" "int16x8_t __arm_vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))\n" "int16x8_t __arm_vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))\n" "uint8x16_t __arm_vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))\n" "uint8x16_t __arm_vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))\n" "uint16x8_t __arm_vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))\n" "uint16x8_t __arm_vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))\n" "int8x16_t __arm_vmovnbq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))\n" "int8x16_t __arm_vmovnbq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))\n" "int16x8_t __arm_vmovnbq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))\n" "int16x8_t __arm_vmovnbq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))\n" "uint8x16_t __arm_vmovnbq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))\n" "uint8x16_t __arm_vmovnbq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))\n" "uint16x8_t __arm_vmovnbq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))\n" "uint16x8_t __arm_vmovnbq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))\n" "int8x16_t __arm_vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))\n" "int8x16_t __arm_vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))\n" "int16x8_t __arm_vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))\n" "int16x8_t __arm_vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))\n" "uint8x16_t __arm_vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))\n" "uint8x16_t __arm_vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))\n" "uint16x8_t __arm_vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))\n" "uint16x8_t __arm_vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))\n" "int8x16_t __arm_vmovntq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))\n" "int8x16_t __arm_vmovntq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))\n" "int16x8_t __arm_vmovntq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))\n" "int16x8_t __arm_vmovntq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))\n" "uint8x16_t __arm_vmovntq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))\n" "uint8x16_t __arm_vmovntq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))\n" "uint16x8_t __arm_vmovntq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))\n" "uint16x8_t __arm_vmovntq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))\n" "int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))\n" "int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))\n" "int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))\n" "int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))\n" "int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))\n" "int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))\n" "uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))\n" "uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))\n" "uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))\n" "uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))\n" "uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))\n" "uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))\n" "int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))\n" "int16x8_t __arm_vmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))\n" "int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))\n" "int32x4_t __arm_vmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))\n" "int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))\n" "int8x16_t __arm_vmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))\n" "uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))\n" "uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))\n" "uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))\n" "uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))\n" "uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))\n" "uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))\n" "int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))\n" "int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))\n" "int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))\n" "int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))\n" "int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))\n" "int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))\n" "uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))\n" "uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))\n" "uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))\n" "uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))\n" "uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))\n" "uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))\n" "int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))\n" "int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))\n" "int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))\n" "int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))\n" "int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))\n" "int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))\n" "uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))\n" "uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))\n" "uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))\n" "uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))\n" "uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))\n" "uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))\n" "int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))\n" "int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))\n" "int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))\n" "int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))\n" "int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))\n" "int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))\n" "uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))\n" "uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))\n" "uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))\n" "uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))\n" "uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))\n" "uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))\n" "int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))\n" "int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))\n" "int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))\n" "int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))\n" "int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))\n" "int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))\n" "uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))\n" "uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))\n" "uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))\n" "uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))\n" "uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))\n" "uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))\n" "uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))\n" "uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))\n" "uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))\n" "uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))\n" "uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))\n" "uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))\n" "uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))\n" "uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))\n" "uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))\n" "uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))\n" "uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))\n" "uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))\n" "int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))\n" "int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))\n" "int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))\n" "int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))\n" "int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))\n" "int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))\n" "uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))\n" "uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))\n" "uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))\n" "uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))\n" "uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))\n" "uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))\n" "int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))\n" "int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))\n" "int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))\n" "int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))\n" "int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))\n" "int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))\n" "uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))\n" "uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))\n" "uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))\n" "uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))\n" "uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))\n" "uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))\n" "int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))\n" "int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))\n" "int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))\n" "int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))\n" "int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))\n" "int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))\n" "uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))\n" "uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))\n" "uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))\n" "uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))\n" "uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))\n" "uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))\n" "uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))\n" "uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))\n" "uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))\n" "uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))\n" "uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))\n" "uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))\n" "uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))\n" "uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))\n" "uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))\n" "uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))\n" "uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))\n" "uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))\n" "int16x8_t __arm_vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))\n" "int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))\n" "int32x4_t __arm_vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))\n" "int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))\n" "int8x16_t __arm_vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))\n" "int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))\n" "uint16x8_t __arm_vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))\n" "uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))\n" "uint32x4_t __arm_vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))\n" "uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))\n" "uint8x16_t __arm_vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))\n" "uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))\n" "int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))\n" "int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))\n" "int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))\n" "int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))\n" "int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))\n" "int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))\n" "uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))\n" "uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))\n" "uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))\n" "uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))\n" "uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))\n" "uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))\n" "int16x8_t __arm_vmulq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))\n" "int16x8_t __arm_vmulq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))\n" "int32x4_t __arm_vmulq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))\n" "int32x4_t __arm_vmulq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))\n" "int8x16_t __arm_vmulq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))\n" "int8x16_t __arm_vmulq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))\n" "uint16x8_t __arm_vmulq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))\n" "uint16x8_t __arm_vmulq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))\n" "uint32x4_t __arm_vmulq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))\n" "uint32x4_t __arm_vmulq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))\n" "uint8x16_t __arm_vmulq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))\n" "uint8x16_t __arm_vmulq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))\n" "int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))\n" "int16x8_t __arm_vmulq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))\n" "int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))\n" "int32x4_t __arm_vmulq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))\n" "int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))\n" "int8x16_t __arm_vmulq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))\n" "uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))\n" "uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))\n" "uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))\n" "uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))\n" "uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))\n" "uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))\n" "int16x8_t __arm_vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))\n" "int16x8_t __arm_vmulq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))\n" "int32x4_t __arm_vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))\n" "int32x4_t __arm_vmulq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))\n" "int8x16_t __arm_vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))\n" "int8x16_t __arm_vmulq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))\n" "uint16x8_t __arm_vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))\n" "uint16x8_t __arm_vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))\n" "uint32x4_t __arm_vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))\n" "uint32x4_t __arm_vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))\n" "uint8x16_t __arm_vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))\n" "uint8x16_t __arm_vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))\n" "int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))\n" "int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))\n" "int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))\n" "int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))\n" "int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))\n" "int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))\n" "uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))\n" "uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))\n" "uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))\n" "uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))\n" "uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))\n" "uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))\n" "int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))\n" "int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))\n" "int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))\n" "int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))\n" "uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))\n" "uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))\n" "uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))\n" "uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))\n" "int16x8_t __arm_vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))\n" "int16x8_t __arm_vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))\n" "int32x4_t __arm_vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))\n" "int32x4_t __arm_vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))\n" "int8x16_t __arm_vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))\n" "int8x16_t __arm_vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))\n" "uint16x8_t __arm_vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))\n" "uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))\n" "uint32x4_t __arm_vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))\n" "uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))\n" "uint8x16_t __arm_vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))\n" "uint8x16_t __arm_vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))\n" "int16x8_t __arm_vmvnq_n_s16(int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))\n" "int32x4_t __arm_vmvnq_n_s32(int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))\n" "uint16x8_t __arm_vmvnq_n_u16(uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))\n" "uint32x4_t __arm_vmvnq_n_u32(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))\n" "int16x8_t __arm_vmvnq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))\n" "int16x8_t __arm_vmvnq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))\n" "int32x4_t __arm_vmvnq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))\n" "int32x4_t __arm_vmvnq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))\n" "int8x16_t __arm_vmvnq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))\n" "int8x16_t __arm_vmvnq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))\n" "uint16x8_t __arm_vmvnq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))\n" "uint16x8_t __arm_vmvnq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))\n" "uint32x4_t __arm_vmvnq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))\n" "uint32x4_t __arm_vmvnq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))\n" "uint8x16_t __arm_vmvnq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))\n" "uint8x16_t __arm_vmvnq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))\n" "int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))\n" "int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))\n" "uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))\n" "uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))\n" "int16x8_t __arm_vmvnq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))\n" "int16x8_t __arm_vmvnq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))\n" "int32x4_t __arm_vmvnq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))\n" "int32x4_t __arm_vmvnq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))\n" "int8x16_t __arm_vmvnq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))\n" "int8x16_t __arm_vmvnq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))\n" "uint16x8_t __arm_vmvnq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))\n" "uint16x8_t __arm_vmvnq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))\n" "uint32x4_t __arm_vmvnq_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))\n" "uint32x4_t __arm_vmvnq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))\n" "uint8x16_t __arm_vmvnq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))\n" "uint8x16_t __arm_vmvnq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))\n" "int16x8_t __arm_vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))\n" "int16x8_t __arm_vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))\n" "int32x4_t __arm_vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))\n" "int32x4_t __arm_vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))\n" "int8x16_t __arm_vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))\n" "int8x16_t __arm_vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))\n" "int16x8_t __arm_vnegq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))\n" "int16x8_t __arm_vnegq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))\n" "int32x4_t __arm_vnegq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))\n" "int32x4_t __arm_vnegq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))\n" "int8x16_t __arm_vnegq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))\n" "int8x16_t __arm_vnegq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))\n" "int16x8_t __arm_vnegq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))\n" "int16x8_t __arm_vnegq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))\n" "int32x4_t __arm_vnegq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))\n" "int32x4_t __arm_vnegq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))\n" "int8x16_t __arm_vnegq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))\n" "int8x16_t __arm_vnegq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))\n" "int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))\n" "int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))\n" "int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))\n" "int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))\n" "int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))\n" "int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))\n" "uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))\n" "uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))\n" "uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))\n" "uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))\n" "uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))\n" "uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))\n" "int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))\n" "int16x8_t __arm_vornq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))\n" "int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))\n" "int32x4_t __arm_vornq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))\n" "int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))\n" "int8x16_t __arm_vornq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))\n" "uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))\n" "uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))\n" "uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))\n" "uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))\n" "uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))\n" "uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))\n" "int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))\n" "int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))\n" "int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))\n" "int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))\n" "int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))\n" "int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))\n" "uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))\n" "uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))\n" "uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))\n" "uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))\n" "uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))\n" "uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))\n" "int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))\n" "int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))\n" "int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))\n" "int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))\n" "uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))\n" "uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))\n" "uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))\n" "uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))\n" "int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))\n" "int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))\n" "int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))\n" "int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))\n" "int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))\n" "int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))\n" "uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))\n" "uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))\n" "uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))\n" "uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))\n" "uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))\n" "uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))\n" "int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))\n" "int16x8_t __arm_vorrq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))\n" "int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))\n" "int32x4_t __arm_vorrq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))\n" "uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))\n" "uint16x8_t __arm_vorrq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))\n" "uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))\n" "uint32x4_t __arm_vorrq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))\n" "int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))\n" "int16x8_t __arm_vorrq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))\n" "int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))\n" "int32x4_t __arm_vorrq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))\n" "int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))\n" "int8x16_t __arm_vorrq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))\n" "uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))\n" "uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))\n" "uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))\n" "uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))\n" "uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))\n" "uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))\n" "int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))\n" "int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))\n" "int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))\n" "int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))\n" "int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))\n" "int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))\n" "uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))\n" "uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))\n" "uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))\n" "uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))\n" "uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))\n" "uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))\n" "mve_pred16_t __arm_vpnot(mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))\n" "int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))\n" "int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))\n" "int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))\n" "int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))\n" "int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))\n" "int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))\n" "int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))\n" "int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))\n" "uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))\n" "uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))\n" "uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))\n" "uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))\n" "uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))\n" "uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))\n" "uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))\n" "uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))\n" "int16x8_t __arm_vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))\n" "int16x8_t __arm_vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))\n" "int32x4_t __arm_vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))\n" "int32x4_t __arm_vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))\n" "int8x16_t __arm_vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))\n" "int8x16_t __arm_vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))\n" "int16x8_t __arm_vqabsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))\n" "int16x8_t __arm_vqabsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))\n" "int32x4_t __arm_vqabsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))\n" "int32x4_t __arm_vqabsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))\n" "int8x16_t __arm_vqabsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))\n" "int8x16_t __arm_vqabsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))\n" "int16x8_t __arm_vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))\n" "int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))\n" "int32x4_t __arm_vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))\n" "int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))\n" "int8x16_t __arm_vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))\n" "int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))\n" "uint16x8_t __arm_vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))\n" "uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))\n" "uint32x4_t __arm_vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))\n" "uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))\n" "uint8x16_t __arm_vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))\n" "uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))\n" "int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))\n" "int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))\n" "int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))\n" "int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))\n" "int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))\n" "int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))\n" "uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))\n" "uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))\n" "uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))\n" "uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))\n" "uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))\n" "uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))\n" "int16x8_t __arm_vqaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))\n" "int16x8_t __arm_vqaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))\n" "int32x4_t __arm_vqaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))\n" "int32x4_t __arm_vqaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))\n" "int8x16_t __arm_vqaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))\n" "int8x16_t __arm_vqaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))\n" "uint16x8_t __arm_vqaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))\n" "uint16x8_t __arm_vqaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))\n" "uint32x4_t __arm_vqaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))\n" "uint32x4_t __arm_vqaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))\n" "uint8x16_t __arm_vqaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))\n" "uint8x16_t __arm_vqaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))\n" "int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))\n" "int16x8_t __arm_vqaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))\n" "int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))\n" "int32x4_t __arm_vqaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))\n" "int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))\n" "int8x16_t __arm_vqaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))\n" "uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))\n" "uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))\n" "uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))\n" "uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))\n" "uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))\n" "uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))\n" "int16x8_t __arm_vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))\n" "int16x8_t __arm_vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))\n" "int32x4_t __arm_vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))\n" "int32x4_t __arm_vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))\n" "int8x16_t __arm_vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))\n" "int8x16_t __arm_vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))\n" "int16x8_t __arm_vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))\n" "int16x8_t __arm_vqdmladhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))\n" "int32x4_t __arm_vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))\n" "int32x4_t __arm_vqdmladhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))\n" "int8x16_t __arm_vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))\n" "int8x16_t __arm_vqdmladhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))\n" "int16x8_t __arm_vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))\n" "int16x8_t __arm_vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))\n" "int32x4_t __arm_vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))\n" "int32x4_t __arm_vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))\n" "int8x16_t __arm_vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))\n" "int8x16_t __arm_vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))\n" "int16x8_t __arm_vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))\n" "int16x8_t __arm_vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))\n" "int32x4_t __arm_vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))\n" "int32x4_t __arm_vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))\n" "int8x16_t __arm_vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))\n" "int8x16_t __arm_vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))\n" "int16x8_t __arm_vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))\n" "int16x8_t __arm_vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))\n" "int32x4_t __arm_vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))\n" "int32x4_t __arm_vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))\n" "int8x16_t __arm_vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))\n" "int8x16_t __arm_vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))\n" "int16x8_t __arm_vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))\n" "int16x8_t __arm_vqdmlahq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))\n" "int32x4_t __arm_vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))\n" "int32x4_t __arm_vqdmlahq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))\n" "int8x16_t __arm_vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))\n" "int8x16_t __arm_vqdmlahq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))\n" "int16x8_t __arm_vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))\n" "int16x8_t __arm_vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))\n" "int32x4_t __arm_vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))\n" "int32x4_t __arm_vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))\n" "int8x16_t __arm_vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))\n" "int8x16_t __arm_vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))\n" "int16x8_t __arm_vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))\n" "int16x8_t __arm_vqdmlashq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))\n" "int32x4_t __arm_vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))\n" "int32x4_t __arm_vqdmlashq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))\n" "int8x16_t __arm_vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))\n" "int8x16_t __arm_vqdmlashq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))\n" "int16x8_t __arm_vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))\n" "int16x8_t __arm_vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))\n" "int32x4_t __arm_vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))\n" "int32x4_t __arm_vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))\n" "int8x16_t __arm_vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))\n" "int8x16_t __arm_vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))\n" "int16x8_t __arm_vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))\n" "int16x8_t __arm_vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))\n" "int32x4_t __arm_vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))\n" "int32x4_t __arm_vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))\n" "int8x16_t __arm_vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))\n" "int8x16_t __arm_vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))\n" "int16x8_t __arm_vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))\n" "int16x8_t __arm_vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))\n" "int32x4_t __arm_vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))\n" "int32x4_t __arm_vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))\n" "int8x16_t __arm_vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))\n" "int8x16_t __arm_vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))\n" "int16x8_t __arm_vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))\n" "int16x8_t __arm_vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))\n" "int32x4_t __arm_vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))\n" "int32x4_t __arm_vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))\n" "int8x16_t __arm_vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))\n" "int8x16_t __arm_vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))\n" "int16x8_t __arm_vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))\n" "int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))\n" "int32x4_t __arm_vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))\n" "int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))\n" "int8x16_t __arm_vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))\n" "int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))\n" "int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))\n" "int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))\n" "int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))\n" "int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))\n" "int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))\n" "int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))\n" "int16x8_t __arm_vqdmulhq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))\n" "int16x8_t __arm_vqdmulhq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))\n" "int32x4_t __arm_vqdmulhq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))\n" "int32x4_t __arm_vqdmulhq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))\n" "int8x16_t __arm_vqdmulhq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))\n" "int8x16_t __arm_vqdmulhq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))\n" "int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))\n" "int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))\n" "int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))\n" "int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))\n" "int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))\n" "int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))\n" "int32x4_t __arm_vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))\n" "int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))\n" "int64x2_t __arm_vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))\n" "int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))\n" "int32x4_t __arm_vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))\n" "int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))\n" "int64x2_t __arm_vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))\n" "int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))\n" "int32x4_t __arm_vqdmullbq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))\n" "int32x4_t __arm_vqdmullbq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))\n" "int64x2_t __arm_vqdmullbq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))\n" "int64x2_t __arm_vqdmullbq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))\n" "int32x4_t __arm_vqdmullbq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))\n" "int32x4_t __arm_vqdmullbq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))\n" "int64x2_t __arm_vqdmullbq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))\n" "int64x2_t __arm_vqdmullbq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))\n" "int32x4_t __arm_vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))\n" "int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))\n" "int64x2_t __arm_vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))\n" "int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))\n" "int32x4_t __arm_vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))\n" "int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))\n" "int64x2_t __arm_vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))\n" "int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))\n" "int32x4_t __arm_vqdmulltq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))\n" "int32x4_t __arm_vqdmulltq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))\n" "int64x2_t __arm_vqdmulltq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))\n" "int64x2_t __arm_vqdmulltq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))\n" "int32x4_t __arm_vqdmulltq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))\n" "int32x4_t __arm_vqdmulltq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))\n" "int64x2_t __arm_vqdmulltq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))\n" "int64x2_t __arm_vqdmulltq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))\n" "int8x16_t __arm_vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))\n" "int8x16_t __arm_vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))\n" "int16x8_t __arm_vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))\n" "int16x8_t __arm_vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))\n" "uint8x16_t __arm_vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))\n" "uint8x16_t __arm_vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))\n" "uint16x8_t __arm_vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))\n" "uint16x8_t __arm_vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))\n" "int8x16_t __arm_vqmovnbq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))\n" "int8x16_t __arm_vqmovnbq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))\n" "int16x8_t __arm_vqmovnbq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))\n" "int16x8_t __arm_vqmovnbq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))\n" "uint8x16_t __arm_vqmovnbq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))\n" "uint8x16_t __arm_vqmovnbq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))\n" "uint16x8_t __arm_vqmovnbq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))\n" "uint16x8_t __arm_vqmovnbq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))\n" "int8x16_t __arm_vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))\n" "int8x16_t __arm_vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))\n" "int16x8_t __arm_vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))\n" "int16x8_t __arm_vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))\n" "uint8x16_t __arm_vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))\n" "uint8x16_t __arm_vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))\n" "uint16x8_t __arm_vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))\n" "uint16x8_t __arm_vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))\n" "int8x16_t __arm_vqmovntq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))\n" "int8x16_t __arm_vqmovntq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))\n" "int16x8_t __arm_vqmovntq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))\n" "int16x8_t __arm_vqmovntq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))\n" "uint8x16_t __arm_vqmovntq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))\n" "uint8x16_t __arm_vqmovntq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))\n" "uint16x8_t __arm_vqmovntq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))\n" "uint16x8_t __arm_vqmovntq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))\n" "uint8x16_t __arm_vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))\n" "uint8x16_t __arm_vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))\n" "uint16x8_t __arm_vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))\n" "uint16x8_t __arm_vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))\n" "uint8x16_t __arm_vqmovunbq_s16(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))\n" "uint8x16_t __arm_vqmovunbq(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))\n" "uint16x8_t __arm_vqmovunbq_s32(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))\n" "uint16x8_t __arm_vqmovunbq(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))\n" "uint8x16_t __arm_vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))\n" "uint8x16_t __arm_vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))\n" "uint16x8_t __arm_vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))\n" "uint16x8_t __arm_vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))\n" "uint8x16_t __arm_vqmovuntq_s16(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))\n" "uint8x16_t __arm_vqmovuntq(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))\n" "uint16x8_t __arm_vqmovuntq_s32(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))\n" "uint16x8_t __arm_vqmovuntq(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))\n" "int16x8_t __arm_vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))\n" "int16x8_t __arm_vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))\n" "int32x4_t __arm_vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))\n" "int32x4_t __arm_vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))\n" "int8x16_t __arm_vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))\n" "int8x16_t __arm_vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))\n" "int16x8_t __arm_vqnegq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))\n" "int16x8_t __arm_vqnegq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))\n" "int32x4_t __arm_vqnegq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))\n" "int32x4_t __arm_vqnegq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))\n" "int8x16_t __arm_vqnegq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))\n" "int8x16_t __arm_vqnegq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))\n" "int16x8_t __arm_vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))\n" "int16x8_t __arm_vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))\n" "int32x4_t __arm_vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))\n" "int32x4_t __arm_vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))\n" "int8x16_t __arm_vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))\n" "int8x16_t __arm_vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))\n" "int16x8_t __arm_vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))\n" "int16x8_t __arm_vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))\n" "int32x4_t __arm_vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))\n" "int32x4_t __arm_vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))\n" "int8x16_t __arm_vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))\n" "int8x16_t __arm_vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))\n" "int16x8_t __arm_vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))\n" "int16x8_t __arm_vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))\n" "int32x4_t __arm_vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))\n" "int32x4_t __arm_vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))\n" "int8x16_t __arm_vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))\n" "int8x16_t __arm_vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))\n" "int16x8_t __arm_vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))\n" "int16x8_t __arm_vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))\n" "int32x4_t __arm_vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))\n" "int32x4_t __arm_vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))\n" "int8x16_t __arm_vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))\n" "int8x16_t __arm_vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))\n" "int16x8_t __arm_vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))\n" "int16x8_t __arm_vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))\n" "int32x4_t __arm_vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))\n" "int32x4_t __arm_vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))\n" "int8x16_t __arm_vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))\n" "int8x16_t __arm_vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))\n" "int16x8_t __arm_vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))\n" "int16x8_t __arm_vqrdmlahq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))\n" "int32x4_t __arm_vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))\n" "int32x4_t __arm_vqrdmlahq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))\n" "int8x16_t __arm_vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))\n" "int8x16_t __arm_vqrdmlahq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))\n" "int16x8_t __arm_vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))\n" "int16x8_t __arm_vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))\n" "int32x4_t __arm_vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))\n" "int32x4_t __arm_vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))\n" "int8x16_t __arm_vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))\n" "int8x16_t __arm_vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))\n" "int16x8_t __arm_vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))\n" "int16x8_t __arm_vqrdmlashq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))\n" "int32x4_t __arm_vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))\n" "int32x4_t __arm_vqrdmlashq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))\n" "int8x16_t __arm_vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))\n" "int8x16_t __arm_vqrdmlashq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))\n" "int16x8_t __arm_vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))\n" "int16x8_t __arm_vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))\n" "int32x4_t __arm_vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))\n" "int32x4_t __arm_vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))\n" "int8x16_t __arm_vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))\n" "int8x16_t __arm_vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))\n" "int16x8_t __arm_vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))\n" "int16x8_t __arm_vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))\n" "int32x4_t __arm_vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))\n" "int32x4_t __arm_vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))\n" "int8x16_t __arm_vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))\n" "int8x16_t __arm_vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))\n" "int16x8_t __arm_vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))\n" "int16x8_t __arm_vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))\n" "int32x4_t __arm_vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))\n" "int32x4_t __arm_vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))\n" "int8x16_t __arm_vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))\n" "int8x16_t __arm_vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))\n" "int16x8_t __arm_vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))\n" "int16x8_t __arm_vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))\n" "int32x4_t __arm_vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))\n" "int32x4_t __arm_vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))\n" "int8x16_t __arm_vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))\n" "int8x16_t __arm_vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))\n" "int16x8_t __arm_vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))\n" "int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))\n" "int32x4_t __arm_vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))\n" "int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))\n" "int8x16_t __arm_vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))\n" "int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))\n" "int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))\n" "int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))\n" "int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))\n" "int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))\n" "int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))\n" "int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))\n" "int16x8_t __arm_vqrdmulhq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))\n" "int16x8_t __arm_vqrdmulhq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))\n" "int32x4_t __arm_vqrdmulhq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))\n" "int32x4_t __arm_vqrdmulhq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))\n" "int8x16_t __arm_vqrdmulhq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))\n" "int8x16_t __arm_vqrdmulhq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))\n" "int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))\n" "int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))\n" "int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))\n" "int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))\n" "int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))\n" "int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))\n" "int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))\n" "int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))\n" "int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))\n" "int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))\n" "int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))\n" "int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))\n" "uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))\n" "uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))\n" "uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))\n" "uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))\n" "uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))\n" "uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))\n" "int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))\n" "int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))\n" "int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))\n" "int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))\n" "int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))\n" "int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))\n" "uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))\n" "uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))\n" "uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))\n" "uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))\n" "uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))\n" "uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))\n" "int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))\n" "int16x8_t __arm_vqrshlq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))\n" "int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))\n" "int32x4_t __arm_vqrshlq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))\n" "int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))\n" "int8x16_t __arm_vqrshlq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))\n" "uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))\n" "uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))\n" "uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))\n" "uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))\n" "uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))\n" "uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))\n" "int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))\n" "int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))\n" "int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))\n" "int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))\n" "int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))\n" "int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))\n" "uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))\n" "uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))\n" "uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))\n" "uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))\n" "uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))\n" "uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))\n" "int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))\n" "int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))\n" "int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))\n" "int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))\n" "int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))\n" "int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))\n" "int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))\n" "int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))\n" "uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))\n" "uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))\n" "uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))\n" "uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))\n" "int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))\n" "int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))\n" "int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))\n" "int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))\n" "uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))\n" "uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))\n" "uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))\n" "uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))\n" "int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))\n" "int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))\n" "int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))\n" "int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))\n" "uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))\n" "uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))\n" "uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))\n" "uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))\n" "uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))\n" "uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))\n" "uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))\n" "uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))\n" "uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))\n" "uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))\n" "uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))\n" "uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))\n" "uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))\n" "uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))\n" "uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))\n" "uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))\n" "uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))\n" "uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))\n" "uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))\n" "uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))\n" "int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))\n" "int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))\n" "int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))\n" "int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))\n" "int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))\n" "int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))\n" "uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))\n" "uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))\n" "uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))\n" "uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))\n" "uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))\n" "uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))\n" "int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))\n" "int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))\n" "int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))\n" "int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))\n" "int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))\n" "int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))\n" "uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))\n" "uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))\n" "uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))\n" "uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))\n" "uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))\n" "uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))\n" "int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))\n" "int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))\n" "int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))\n" "int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))\n" "int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))\n" "int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))\n" "uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))\n" "uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))\n" "uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))\n" "uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))\n" "uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))\n" "uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))\n" "int16x8_t __arm_vqshlq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))\n" "int16x8_t __arm_vqshlq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))\n" "int32x4_t __arm_vqshlq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))\n" "int32x4_t __arm_vqshlq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))\n" "int8x16_t __arm_vqshlq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))\n" "int8x16_t __arm_vqshlq_n(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))\n" "uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))\n" "uint16x8_t __arm_vqshlq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))\n" "uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))\n" "uint32x4_t __arm_vqshlq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))\n" "uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))\n" "uint8x16_t __arm_vqshlq_n(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))\n" "int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))\n" "int16x8_t __arm_vqshlq_r(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))\n" "int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))\n" "int32x4_t __arm_vqshlq_r(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))\n" "int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))\n" "int8x16_t __arm_vqshlq_r(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))\n" "uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))\n" "uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))\n" "uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))\n" "uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))\n" "uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))\n" "uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))\n" "int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))\n" "int16x8_t __arm_vqshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))\n" "int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))\n" "int32x4_t __arm_vqshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))\n" "int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))\n" "int8x16_t __arm_vqshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))\n" "uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))\n" "uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))\n" "uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))\n" "uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))\n" "uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))\n" "uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))\n" "uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))\n" "uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))\n" "uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))\n" "uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))\n" "uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))\n" "uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))\n" "uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))\n" "uint16x8_t __arm_vqshluq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))\n" "uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))\n" "uint32x4_t __arm_vqshluq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))\n" "uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))\n" "uint8x16_t __arm_vqshluq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))\n" "int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))\n" "int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))\n" "int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))\n" "int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))\n" "int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))\n" "int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))\n" "int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))\n" "int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))\n" "uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))\n" "uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))\n" "uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))\n" "uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))\n" "int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))\n" "int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))\n" "int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))\n" "int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))\n" "uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))\n" "uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))\n" "uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))\n" "uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))\n" "int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))\n" "int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))\n" "int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))\n" "int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))\n" "uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))\n" "uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))\n" "uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))\n" "uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))\n" "uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))\n" "uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))\n" "uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))\n" "uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))\n" "uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))\n" "uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))\n" "uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))\n" "uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))\n" "uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))\n" "uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))\n" "uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))\n" "uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))\n" "uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))\n" "uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))\n" "uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))\n" "uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))\n" "int16x8_t __arm_vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))\n" "int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))\n" "int32x4_t __arm_vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))\n" "int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))\n" "int8x16_t __arm_vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))\n" "int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))\n" "uint16x8_t __arm_vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))\n" "uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))\n" "uint32x4_t __arm_vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))\n" "uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))\n" "uint8x16_t __arm_vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))\n" "uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))\n" "int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))\n" "int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))\n" "int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))\n" "int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))\n" "int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))\n" "int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))\n" "uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))\n" "uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))\n" "uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))\n" "uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))\n" "uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))\n" "uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))\n" "int16x8_t __arm_vqsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))\n" "int16x8_t __arm_vqsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))\n" "int32x4_t __arm_vqsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))\n" "int32x4_t __arm_vqsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))\n" "int8x16_t __arm_vqsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))\n" "int8x16_t __arm_vqsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))\n" "uint16x8_t __arm_vqsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))\n" "uint16x8_t __arm_vqsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))\n" "uint32x4_t __arm_vqsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))\n" "uint32x4_t __arm_vqsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))\n" "uint8x16_t __arm_vqsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))\n" "uint8x16_t __arm_vqsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))\n" "int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))\n" "int16x8_t __arm_vqsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))\n" "int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))\n" "int32x4_t __arm_vqsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))\n" "int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))\n" "int8x16_t __arm_vqsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))\n" "uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))\n" "uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))\n" "uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))\n" "uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))\n" "uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))\n" "uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))\n" "int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))\n" "int16x8_t __arm_vreinterpretq_s16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))\n" "int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))\n" "int16x8_t __arm_vreinterpretq_s16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))\n" "int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))\n" "int16x8_t __arm_vreinterpretq_s16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))\n" "int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))\n" "int16x8_t __arm_vreinterpretq_s16(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))\n" "int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))\n" "int16x8_t __arm_vreinterpretq_s16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))\n" "int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))\n" "int16x8_t __arm_vreinterpretq_s16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))\n" "int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))\n" "int16x8_t __arm_vreinterpretq_s16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))\n" "int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))\n" "int32x4_t __arm_vreinterpretq_s32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))\n" "int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))\n" "int32x4_t __arm_vreinterpretq_s32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))\n" "int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))\n" "int32x4_t __arm_vreinterpretq_s32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))\n" "int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))\n" "int32x4_t __arm_vreinterpretq_s32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))\n" "int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))\n" "int32x4_t __arm_vreinterpretq_s32(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))\n" "int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))\n" "int32x4_t __arm_vreinterpretq_s32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))\n" "int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))\n" "int32x4_t __arm_vreinterpretq_s32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))\n" "int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))\n" "int64x2_t __arm_vreinterpretq_s64(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))\n" "int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))\n" "int64x2_t __arm_vreinterpretq_s64(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))\n" "int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))\n" "int64x2_t __arm_vreinterpretq_s64(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))\n" "int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))\n" "int64x2_t __arm_vreinterpretq_s64(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))\n" "int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))\n" "int64x2_t __arm_vreinterpretq_s64(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))\n" "int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))\n" "int64x2_t __arm_vreinterpretq_s64(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))\n" "int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))\n" "int64x2_t __arm_vreinterpretq_s64(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))\n" "int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))\n" "int8x16_t __arm_vreinterpretq_s8(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))\n" "int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))\n" "int8x16_t __arm_vreinterpretq_s8(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))\n" "int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))\n" "int8x16_t __arm_vreinterpretq_s8(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))\n" "int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))\n" "int8x16_t __arm_vreinterpretq_s8(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))\n" "int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))\n" "int8x16_t __arm_vreinterpretq_s8(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))\n" "int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))\n" "int8x16_t __arm_vreinterpretq_s8(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))\n" "int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))\n" "int8x16_t __arm_vreinterpretq_s8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))\n" "uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))\n" "uint16x8_t __arm_vreinterpretq_u16(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))\n" "uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))\n" "uint16x8_t __arm_vreinterpretq_u16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))\n" "uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))\n" "uint16x8_t __arm_vreinterpretq_u16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))\n" "uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))\n" "uint16x8_t __arm_vreinterpretq_u16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))\n" "uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))\n" "uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))\n" "uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))\n" "uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))\n" "uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))\n" "uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))\n" "uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))\n" "uint32x4_t __arm_vreinterpretq_u32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))\n" "uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))\n" "uint32x4_t __arm_vreinterpretq_u32(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))\n" "uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))\n" "uint32x4_t __arm_vreinterpretq_u32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))\n" "uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))\n" "uint32x4_t __arm_vreinterpretq_u32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))\n" "uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))\n" "uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))\n" "uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))\n" "uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))\n" "uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))\n" "uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))\n" "uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))\n" "uint64x2_t __arm_vreinterpretq_u64(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))\n" "uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))\n" "uint64x2_t __arm_vreinterpretq_u64(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))\n" "uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))\n" "uint64x2_t __arm_vreinterpretq_u64(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))\n" "uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))\n" "uint64x2_t __arm_vreinterpretq_u64(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))\n" "uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))\n" "uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))\n" "uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))\n" "uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))\n" "uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))\n" "uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))\n" "uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))\n" "uint8x16_t __arm_vreinterpretq_u8(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))\n" "uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))\n" "uint8x16_t __arm_vreinterpretq_u8(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))\n" "uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))\n" "uint8x16_t __arm_vreinterpretq_u8(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))\n" "uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))\n" "uint8x16_t __arm_vreinterpretq_u8(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))\n" "uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))\n" "uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))\n" "uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))\n" "uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))\n" "int8x16_t __arm_vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))\n" "int8x16_t __arm_vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))\n" "uint8x16_t __arm_vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))\n" "uint8x16_t __arm_vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))\n" "int8x16_t __arm_vrev16q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))\n" "int8x16_t __arm_vrev16q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))\n" "uint8x16_t __arm_vrev16q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))\n" "uint8x16_t __arm_vrev16q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))\n" "int8x16_t __arm_vrev16q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))\n" "int8x16_t __arm_vrev16q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))\n" "uint8x16_t __arm_vrev16q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))\n" "uint8x16_t __arm_vrev16q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))\n" "int16x8_t __arm_vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))\n" "int16x8_t __arm_vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))\n" "int8x16_t __arm_vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))\n" "int8x16_t __arm_vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))\n" "uint16x8_t __arm_vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))\n" "uint16x8_t __arm_vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))\n" "uint8x16_t __arm_vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))\n" "uint8x16_t __arm_vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))\n" "int16x8_t __arm_vrev32q_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))\n" "int16x8_t __arm_vrev32q(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))\n" "int8x16_t __arm_vrev32q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))\n" "int8x16_t __arm_vrev32q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))\n" "uint16x8_t __arm_vrev32q_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))\n" "uint16x8_t __arm_vrev32q(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))\n" "uint8x16_t __arm_vrev32q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))\n" "uint8x16_t __arm_vrev32q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))\n" "int16x8_t __arm_vrev32q_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))\n" "int16x8_t __arm_vrev32q_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))\n" "int8x16_t __arm_vrev32q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))\n" "int8x16_t __arm_vrev32q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))\n" "uint16x8_t __arm_vrev32q_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))\n" "uint16x8_t __arm_vrev32q_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))\n" "uint8x16_t __arm_vrev32q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))\n" "uint8x16_t __arm_vrev32q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))\n" "int16x8_t __arm_vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))\n" "int16x8_t __arm_vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))\n" "int32x4_t __arm_vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))\n" "int32x4_t __arm_vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))\n" "int8x16_t __arm_vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))\n" "int8x16_t __arm_vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))\n" "uint16x8_t __arm_vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))\n" "uint16x8_t __arm_vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))\n" "uint32x4_t __arm_vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))\n" "uint32x4_t __arm_vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))\n" "uint8x16_t __arm_vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))\n" "uint8x16_t __arm_vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))\n" "int16x8_t __arm_vrev64q_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))\n" "int16x8_t __arm_vrev64q(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))\n" "int32x4_t __arm_vrev64q_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))\n" "int32x4_t __arm_vrev64q(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))\n" "int8x16_t __arm_vrev64q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))\n" "int8x16_t __arm_vrev64q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))\n" "uint16x8_t __arm_vrev64q_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))\n" "uint16x8_t __arm_vrev64q(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))\n" "uint32x4_t __arm_vrev64q_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))\n" "uint32x4_t __arm_vrev64q(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))\n" "uint8x16_t __arm_vrev64q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))\n" "uint8x16_t __arm_vrev64q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))\n" "int16x8_t __arm_vrev64q_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))\n" "int16x8_t __arm_vrev64q_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))\n" "int32x4_t __arm_vrev64q_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))\n" "int32x4_t __arm_vrev64q_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))\n" "int8x16_t __arm_vrev64q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))\n" "int8x16_t __arm_vrev64q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))\n" "uint16x8_t __arm_vrev64q_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))\n" "uint16x8_t __arm_vrev64q_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))\n" "uint32x4_t __arm_vrev64q_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))\n" "uint32x4_t __arm_vrev64q_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))\n" "uint8x16_t __arm_vrev64q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))\n" "uint8x16_t __arm_vrev64q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))\n" "int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))\n" "int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))\n" "int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))\n" "int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))\n" "int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))\n" "int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))\n" "uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))\n" "uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))\n" "uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))\n" "uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))\n" "uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))\n" "uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))\n" "int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))\n" "int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))\n" "int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))\n" "int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))\n" "int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))\n" "int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))\n" "uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))\n" "uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))\n" "uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))\n" "uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))\n" "uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))\n" "uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))\n" "int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))\n" "int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))\n" "int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))\n" "int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))\n" "int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))\n" "int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))\n" "uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))\n" "uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))\n" "uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))\n" "uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))\n" "uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))\n" "uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))\n" "int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))\n" "int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))\n" "uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))\n" "uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))\n" "int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))\n" "int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))\n" "uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))\n" "uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))\n" "int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))\n" "int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))\n" "int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))\n" "int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))\n" "int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))\n" "int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))\n" "uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))\n" "uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))\n" "int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))\n" "int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))\n" "uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))\n" "uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))\n" "int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))\n" "int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))\n" "int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))\n" "int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))\n" "int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))\n" "int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))\n" "int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))\n" "int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))\n" "int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))\n" "int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))\n" "int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))\n" "int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))\n" "int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))\n" "int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))\n" "int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))\n" "int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))\n" "int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))\n" "int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))\n" "int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))\n" "int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))\n" "int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))\n" "int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))\n" "int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))\n" "int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))\n" "int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))\n" "int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))\n" "uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))\n" "uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))\n" "uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))\n" "uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))\n" "uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))\n" "uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))\n" "int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))\n" "int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))\n" "int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))\n" "int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))\n" "int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))\n" "int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))\n" "uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))\n" "uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))\n" "uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))\n" "uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))\n" "uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))\n" "uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))\n" "int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))\n" "int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))\n" "int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))\n" "int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))\n" "int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))\n" "int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))\n" "uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))\n" "uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))\n" "uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))\n" "uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))\n" "uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))\n" "uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))\n" "int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))\n" "int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))\n" "int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))\n" "int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))\n" "int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))\n" "int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))\n" "uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))\n" "uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))\n" "uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))\n" "uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))\n" "uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))\n" "uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))\n" "int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))\n" "int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))\n" "int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))\n" "int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))\n" "int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))\n" "int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))\n" "uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))\n" "uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))\n" "uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))\n" "uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))\n" "uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))\n" "uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))\n" "int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))\n" "int16x8_t __arm_vrshlq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))\n" "int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))\n" "int32x4_t __arm_vrshlq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))\n" "int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))\n" "int8x16_t __arm_vrshlq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))\n" "uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))\n" "uint16x8_t __arm_vrshlq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))\n" "uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))\n" "uint32x4_t __arm_vrshlq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))\n" "uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))\n" "uint8x16_t __arm_vrshlq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))\n" "int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))\n" "int16x8_t __arm_vrshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))\n" "int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))\n" "int32x4_t __arm_vrshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))\n" "int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))\n" "int8x16_t __arm_vrshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))\n" "uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))\n" "uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))\n" "uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))\n" "uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))\n" "uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))\n" "uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))\n" "int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))\n" "int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))\n" "int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))\n" "int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))\n" "int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))\n" "int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))\n" "uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))\n" "uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))\n" "uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))\n" "uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))\n" "uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))\n" "uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))\n" "int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))\n" "int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))\n" "int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))\n" "int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))\n" "int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))\n" "int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))\n" "int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))\n" "int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))\n" "uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))\n" "uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))\n" "uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))\n" "uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))\n" "int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))\n" "int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))\n" "int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))\n" "int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))\n" "uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))\n" "uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))\n" "uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))\n" "uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))\n" "int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))\n" "int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))\n" "int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))\n" "int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))\n" "uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))\n" "uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))\n" "uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))\n" "uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))\n" "int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))\n" "int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))\n" "int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))\n" "int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))\n" "int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))\n" "int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))\n" "uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))\n" "uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))\n" "uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))\n" "uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))\n" "uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))\n" "uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))\n" "int16x8_t __arm_vrshrq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))\n" "int16x8_t __arm_vrshrq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))\n" "int32x4_t __arm_vrshrq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))\n" "int32x4_t __arm_vrshrq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))\n" "int8x16_t __arm_vrshrq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))\n" "int8x16_t __arm_vrshrq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))\n" "uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))\n" "uint16x8_t __arm_vrshrq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))\n" "uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))\n" "uint32x4_t __arm_vrshrq(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))\n" "uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))\n" "uint8x16_t __arm_vrshrq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))\n" "int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))\n" "int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))\n" "int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))\n" "int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))\n" "int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))\n" "int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))\n" "uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))\n" "uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))\n" "uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))\n" "uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))\n" "uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))\n" "uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))\n" "int32x4_t __arm_vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))\n" "int32x4_t __arm_vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))\n" "uint32x4_t __arm_vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))\n" "uint32x4_t __arm_vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))\n" "int32x4_t __arm_vsbciq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))\n" "int32x4_t __arm_vsbciq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))\n" "uint32x4_t __arm_vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))\n" "uint32x4_t __arm_vsbciq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))\n" "int32x4_t __arm_vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))\n" "int32x4_t __arm_vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))\n" "uint32x4_t __arm_vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))\n" "uint32x4_t __arm_vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))\n" "int32x4_t __arm_vsbcq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))\n" "int32x4_t __arm_vsbcq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))\n" "uint32x4_t __arm_vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))\n" "uint32x4_t __arm_vsbcq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))\n" "int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))\n" "int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))\n" "int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))\n" "int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))\n" "int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))\n" "int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))\n" "int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))\n" "int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))\n" "uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))\n" "uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))\n" "uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))\n" "uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))\n" "uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))\n" "uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))\n" "uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))\n" "uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))\n" "int16x8_t __arm_vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))\n" "int16x8_t __arm_vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))\n" "int32x4_t __arm_vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))\n" "int32x4_t __arm_vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))\n" "int8x16_t __arm_vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))\n" "int8x16_t __arm_vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))\n" "uint16x8_t __arm_vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))\n" "uint16x8_t __arm_vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))\n" "uint32x4_t __arm_vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))\n" "uint32x4_t __arm_vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))\n" "uint8x16_t __arm_vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))\n" "uint8x16_t __arm_vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))\n" "int16x8_t __arm_vshlcq_s16(int16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))\n" "int16x8_t __arm_vshlcq(int16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))\n" "int32x4_t __arm_vshlcq_s32(int32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))\n" "int32x4_t __arm_vshlcq(int32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))\n" "int8x16_t __arm_vshlcq_s8(int8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))\n" "int8x16_t __arm_vshlcq(int8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))\n" "uint16x8_t __arm_vshlcq_u16(uint16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))\n" "uint16x8_t __arm_vshlcq(uint16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))\n" "uint32x4_t __arm_vshlcq_u32(uint32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))\n" "uint32x4_t __arm_vshlcq(uint32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))\n" "uint8x16_t __arm_vshlcq_u8(uint8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))\n" "uint8x16_t __arm_vshlcq(uint8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))\n" "int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))\n" "int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))\n" "int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))\n" "int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))\n" "uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))\n" "uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))\n" "uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))\n" "uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))\n" "int32x4_t __arm_vshllbq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))\n" "int32x4_t __arm_vshllbq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))\n" "int16x8_t __arm_vshllbq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))\n" "int16x8_t __arm_vshllbq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))\n" "uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))\n" "uint32x4_t __arm_vshllbq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))\n" "uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))\n" "uint16x8_t __arm_vshllbq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))\n" "int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))\n" "int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))\n" "int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))\n" "int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))\n" "uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))\n" "uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))\n" "uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))\n" "uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))\n" "int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))\n" "int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))\n" "int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))\n" "int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))\n" "uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))\n" "uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))\n" "uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))\n" "uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))\n" "int32x4_t __arm_vshlltq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))\n" "int32x4_t __arm_vshlltq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))\n" "int16x8_t __arm_vshlltq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))\n" "int16x8_t __arm_vshlltq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))\n" "uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))\n" "uint32x4_t __arm_vshlltq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))\n" "uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))\n" "uint16x8_t __arm_vshlltq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))\n" "int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))\n" "int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))\n" "int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))\n" "int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))\n" "uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))\n" "uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))\n" "uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))\n" "uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))\n" "int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))\n" "int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))\n" "int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))\n" "int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))\n" "int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))\n" "int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))\n" "uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))\n" "uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))\n" "uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))\n" "uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))\n" "uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))\n" "uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))\n" "int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))\n" "int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))\n" "int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))\n" "int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))\n" "int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))\n" "int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))\n" "uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))\n" "uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))\n" "uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))\n" "uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))\n" "uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))\n" "uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))\n" "int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))\n" "int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))\n" "int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))\n" "int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))\n" "int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))\n" "int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))\n" "uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))\n" "uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))\n" "uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))\n" "uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))\n" "uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))\n" "uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))\n" "int16x8_t __arm_vshlq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))\n" "int16x8_t __arm_vshlq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))\n" "int32x4_t __arm_vshlq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))\n" "int32x4_t __arm_vshlq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))\n" "int8x16_t __arm_vshlq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))\n" "int8x16_t __arm_vshlq_n(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))\n" "uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))\n" "uint16x8_t __arm_vshlq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))\n" "uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))\n" "uint32x4_t __arm_vshlq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))\n" "uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))\n" "uint8x16_t __arm_vshlq_n(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))\n" "int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))\n" "int16x8_t __arm_vshlq_r(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))\n" "int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))\n" "int32x4_t __arm_vshlq_r(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))\n" "int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))\n" "int8x16_t __arm_vshlq_r(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))\n" "uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))\n" "uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))\n" "uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))\n" "uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))\n" "uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))\n" "uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))\n" "int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))\n" "int16x8_t __arm_vshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))\n" "int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))\n" "int32x4_t __arm_vshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))\n" "int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))\n" "int8x16_t __arm_vshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))\n" "uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))\n" "uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))\n" "uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))\n" "uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))\n" "uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))\n" "uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))\n" "int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))\n" "int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))\n" "int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))\n" "int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))\n" "int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))\n" "int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))\n" "uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))\n" "uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))\n" "uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))\n" "uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))\n" "uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))\n" "uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))\n" "int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))\n" "int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))\n" "int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))\n" "int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))\n" "int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))\n" "int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))\n" "uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))\n" "uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))\n" "uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))\n" "uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))\n" "uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))\n" "uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))\n" "int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))\n" "int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))\n" "int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))\n" "int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))\n" "uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))\n" "uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))\n" "int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))\n" "int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))\n" "int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))\n" "int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))\n" "uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))\n" "uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))\n" "uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))\n" "uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))\n" "int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))\n" "int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))\n" "int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))\n" "int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))\n" "uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))\n" "uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))\n" "uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))\n" "uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))\n" "int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))\n" "int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))\n" "int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))\n" "int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))\n" "uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))\n" "uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))\n" "uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))\n" "uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))\n" "int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))\n" "int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))\n" "int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))\n" "int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))\n" "int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))\n" "int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))\n" "uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))\n" "uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))\n" "uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))\n" "uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))\n" "uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))\n" "uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))\n" "int16x8_t __arm_vshrq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))\n" "int16x8_t __arm_vshrq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))\n" "int32x4_t __arm_vshrq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))\n" "int32x4_t __arm_vshrq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))\n" "int8x16_t __arm_vshrq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))\n" "int8x16_t __arm_vshrq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))\n" "uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))\n" "uint16x8_t __arm_vshrq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))\n" "uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))\n" "uint32x4_t __arm_vshrq(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))\n" "uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))\n" "uint8x16_t __arm_vshrq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))\n" "int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))\n" "int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))\n" "int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))\n" "int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))\n" "int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))\n" "int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))\n" "uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))\n" "uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))\n" "uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))\n" "uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))\n" "uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))\n" "uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))\n" "int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))\n" "int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))\n" "int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))\n" "int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))\n" "int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))\n" "int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))\n" "uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))\n" "uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))\n" "uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))\n" "uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))\n" "uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))\n" "uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))\n" "int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))\n" "int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))\n" "int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))\n" "int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))\n" "int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))\n" "int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))\n" "uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))\n" "uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))\n" "uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))\n" "uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))\n" "uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))\n" "uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))\n" "int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))\n" "int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))\n" "int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))\n" "int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))\n" "int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))\n" "int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))\n" "uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))\n" "uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))\n" "uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))\n" "uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))\n" "uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))\n" "uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))\n" "int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))\n" "int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))\n" "int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))\n" "int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))\n" "int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))\n" "int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))\n" "uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))\n" "uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))\n" "uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))\n" "uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))\n" "uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))\n" "uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))\n" "void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))\n" "void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))\n" "void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))\n" "void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))\n" "void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))\n" "void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))\n" "void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))\n" "void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))\n" "void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))\n" "void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))\n" "void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))\n" "void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))\n" "void __arm_vst1q_s16(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))\n" "void __arm_vst1q(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))\n" "void __arm_vst1q_s32(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))\n" "void __arm_vst1q(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))\n" "void __arm_vst1q_s8(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))\n" "void __arm_vst1q(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))\n" "void __arm_vst1q_u16(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))\n" "void __arm_vst1q(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))\n" "void __arm_vst1q_u32(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))\n" "void __arm_vst1q(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))\n" "void __arm_vst1q_u8(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))\n" "void __arm_vst1q(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))\n" "void __arm_vst2q_s16(int16_t *, int16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))\n" "void __arm_vst2q(int16_t *, int16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))\n" "void __arm_vst2q_s32(int32_t *, int32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))\n" "void __arm_vst2q(int32_t *, int32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))\n" "void __arm_vst2q_s8(int8_t *, int8x16x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))\n" "void __arm_vst2q(int8_t *, int8x16x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))\n" "void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))\n" "void __arm_vst2q(uint16_t *, uint16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))\n" "void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))\n" "void __arm_vst2q(uint32_t *, uint32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))\n" "void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))\n" "void __arm_vst2q(uint8_t *, uint8x16x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))\n" "void __arm_vst4q_s16(int16_t *, int16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))\n" "void __arm_vst4q(int16_t *, int16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))\n" "void __arm_vst4q_s32(int32_t *, int32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))\n" "void __arm_vst4q(int32_t *, int32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))\n" "void __arm_vst4q_s8(int8_t *, int8x16x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))\n" "void __arm_vst4q(int8_t *, int8x16x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))\n" "void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))\n" "void __arm_vst4q(uint16_t *, uint16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))\n" "void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))\n" "void __arm_vst4q(uint32_t *, uint32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))\n" "void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))\n" "void __arm_vst4q(uint8_t *, uint8x16x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))\n" "void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))\n" "void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))\n" "void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))\n" "void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))\n" "void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))\n" "void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))\n" "void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))\n" "void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))\n" "void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))\n" "void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))\n" "void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))\n" "void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))\n" "void __arm_vstrbq_s16(int8_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))\n" "void __arm_vstrbq(int8_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))\n" "void __arm_vstrbq_s32(int8_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))\n" "void __arm_vstrbq(int8_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))\n" "void __arm_vstrbq_s8(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))\n" "void __arm_vstrbq(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))\n" "void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))\n" "void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))\n" "void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))\n" "void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))\n" "void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))\n" "void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))\n" "void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))\n" "void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))\n" "void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))\n" "void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))\n" "void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))\n" "void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))\n" "void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))\n" "void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))\n" "void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))\n" "void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))\n" "void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))\n" "void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))\n" "void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))\n" "void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))\n" "void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))\n" "void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))\n" "void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))\n" "void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))\n" "void __arm_vstrbq_u16(uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))\n" "void __arm_vstrbq(uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))\n" "void __arm_vstrbq_u32(uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))\n" "void __arm_vstrbq(uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))\n" "void __arm_vstrbq_u8(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))\n" "void __arm_vstrbq(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))\n" "void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))\n" "void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))\n" "void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))\n" "void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))\n" "void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))\n" "void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))\n" "void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))\n" "void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))\n" "void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))\n" "void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))\n" "void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))\n" "void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))\n" "void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))\n" "void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))\n" "void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))\n" "void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))\n" "void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))\n" "void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))\n" "void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))\n" "void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))\n" "void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))\n" "void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))\n" "void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))\n" "void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))\n" "void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))\n" "void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))\n" "void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))\n" "void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))\n" "void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))\n" "void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))\n" "void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))\n" "void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))\n" "void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))\n" "void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))\n" "void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))\n" "void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))\n" "void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))\n" "void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))\n" "void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))\n" "void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))\n" "void __arm_vstrhq_s16(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))\n" "void __arm_vstrhq(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))\n" "void __arm_vstrhq_s32(int16_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))\n" "void __arm_vstrhq(int16_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))\n" "void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))\n" "void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))\n" "void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))\n" "void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))\n" "void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))\n" "void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))\n" "void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))\n" "void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))\n" "void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))\n" "void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))\n" "void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))\n" "void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))\n" "void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))\n" "void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))\n" "void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))\n" "void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))\n" "void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))\n" "void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))\n" "void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))\n" "void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))\n" "void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))\n" "void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))\n" "void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))\n" "void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))\n" "void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))\n" "void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))\n" "void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))\n" "void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))\n" "void __arm_vstrhq_u16(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))\n" "void __arm_vstrhq(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))\n" "void __arm_vstrhq_u32(uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))\n" "void __arm_vstrhq(uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))\n" "void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))\n" "void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))\n" "void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))\n" "void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))\n" "void __arm_vstrwq_s32(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))\n" "void __arm_vstrwq(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))\n" "void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))\n" "void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))\n" "void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))\n" "void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))\n" "void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))\n" "void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))\n" "void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))\n" "void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))\n" "void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))\n" "void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))\n" "void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))\n" "void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))\n" "void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))\n" "void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))\n" "void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))\n" "void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))\n" "void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))\n" "void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))\n" "void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))\n" "void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))\n" "void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))\n" "void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))\n" "void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))\n" "void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))\n" "void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))\n" "void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))\n" "void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))\n" "void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))\n" "void __arm_vstrwq_u32(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))\n" "void __arm_vstrwq(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))\n" "int16x8_t __arm_vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))\n" "int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))\n" "int32x4_t __arm_vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))\n" "int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))\n" "int8x16_t __arm_vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))\n" "int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))\n" "uint16x8_t __arm_vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))\n" "uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))\n" "uint32x4_t __arm_vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))\n" "uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))\n" "uint8x16_t __arm_vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))\n" "uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))\n" "int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))\n" "int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))\n" "int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))\n" "int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))\n" "int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))\n" "int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))\n" "uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))\n" "uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))\n" "uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))\n" "uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))\n" "uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))\n" "uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))\n" "int16x8_t __arm_vsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))\n" "int16x8_t __arm_vsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))\n" "int32x4_t __arm_vsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))\n" "int32x4_t __arm_vsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))\n" "int8x16_t __arm_vsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))\n" "int8x16_t __arm_vsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))\n" "uint16x8_t __arm_vsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))\n" "uint16x8_t __arm_vsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))\n" "uint32x4_t __arm_vsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))\n" "uint32x4_t __arm_vsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))\n" "uint8x16_t __arm_vsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))\n" "uint8x16_t __arm_vsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))\n" "int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))\n" "int16x8_t __arm_vsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))\n" "int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))\n" "int32x4_t __arm_vsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))\n" "int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))\n" "int8x16_t __arm_vsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))\n" "uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))\n" "uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))\n" "uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))\n" "uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))\n" "uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))\n" "uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))\n" "int16x8_t __arm_vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))\n" "int16x8_t __arm_vsubq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))\n" "int32x4_t __arm_vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))\n" "int32x4_t __arm_vsubq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))\n" "int8x16_t __arm_vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))\n" "int8x16_t __arm_vsubq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))\n" "uint16x8_t __arm_vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))\n" "uint16x8_t __arm_vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))\n" "uint32x4_t __arm_vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))\n" "uint32x4_t __arm_vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))\n" "uint8x16_t __arm_vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))\n" "uint8x16_t __arm_vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))\n" "int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))\n" "int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))\n" "int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))\n" "int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))\n" "int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))\n" "int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))\n" "uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))\n" "uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))\n" "uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))\n" "uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))\n" "uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))\n" "uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))\n" "int16x8_t __arm_vuninitializedq(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))\n" "int32x4_t __arm_vuninitializedq(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))\n" "int64x2_t __arm_vuninitializedq(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))\n" "int8x16_t __arm_vuninitializedq(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))\n" "uint16x8_t __arm_vuninitializedq(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))\n" "uint32x4_t __arm_vuninitializedq(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))\n" "uint64x2_t __arm_vuninitializedq(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))\n" "uint8x16_t __arm_vuninitializedq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))\n" "int16x8_t __arm_vuninitializedq_s16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))\n" "int32x4_t __arm_vuninitializedq_s32();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))\n" "int64x2_t __arm_vuninitializedq_s64();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))\n" "int8x16_t __arm_vuninitializedq_s8();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))\n" "uint16x8_t __arm_vuninitializedq_u16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))\n" "uint32x4_t __arm_vuninitializedq_u32();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))\n" "uint64x2_t __arm_vuninitializedq_u64();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))\n" "uint8x16_t __arm_vuninitializedq_u8();\n" "\n" "#if (__ARM_FEATURE_MVE & 2)\n" "\n" "typedef __fp16 float16_t;\n" "typedef float float32_t;\n" "typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;\n" "typedef struct { float16x8_t val[2]; } float16x8x2_t;\n" "typedef struct { float16x8_t val[4]; } float16x8x4_t;\n" "typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;\n" "typedef struct { float32x4_t val[2]; } float32x4x2_t;\n" "typedef struct { float32x4_t val[4]; } float32x4x4_t;\n" "\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))\n" "float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))\n" "float16x8_t __arm_vabdq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))\n" "float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))\n" "float32x4_t __arm_vabdq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))\n" "float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))\n" "float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))\n" "float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))\n" "float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))\n" "float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))\n" "float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))\n" "float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))\n" "float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))\n" "float16x8_t __arm_vabsq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))\n" "float16x8_t __arm_vabsq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))\n" "float32x4_t __arm_vabsq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))\n" "float32x4_t __arm_vabsq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))\n" "float16x8_t __arm_vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))\n" "float16x8_t __arm_vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))\n" "float32x4_t __arm_vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))\n" "float32x4_t __arm_vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))\n" "float16x8_t __arm_vabsq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))\n" "float16x8_t __arm_vabsq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))\n" "float32x4_t __arm_vabsq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))\n" "float32x4_t __arm_vabsq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))\n" "float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))\n" "float16x8_t __arm_vaddq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))\n" "float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))\n" "float32x4_t __arm_vaddq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))\n" "float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))\n" "float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))\n" "float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))\n" "float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))\n" "float16x8_t __arm_vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))\n" "float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))\n" "float32x4_t __arm_vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))\n" "float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))\n" "float16x8_t __arm_vaddq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))\n" "float16x8_t __arm_vaddq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))\n" "float32x4_t __arm_vaddq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))\n" "float32x4_t __arm_vaddq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))\n" "float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))\n" "float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))\n" "float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))\n" "float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))\n" "float16x8_t __arm_vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))\n" "float16x8_t __arm_vaddq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))\n" "float32x4_t __arm_vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))\n" "float32x4_t __arm_vaddq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))\n" "float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))\n" "float16x8_t __arm_vandq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))\n" "float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))\n" "float32x4_t __arm_vandq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))\n" "float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))\n" "float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))\n" "float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))\n" "float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))\n" "float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))\n" "float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))\n" "float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))\n" "float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))\n" "float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))\n" "float16x8_t __arm_vbicq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))\n" "float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))\n" "float32x4_t __arm_vbicq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))\n" "float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))\n" "float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))\n" "float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))\n" "float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))\n" "float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))\n" "float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))\n" "float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))\n" "float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))\n" "float16x8_t __arm_vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))\n" "float16x8_t __arm_vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))\n" "float32x4_t __arm_vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))\n" "float32x4_t __arm_vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))\n" "float16x8_t __arm_vbrsrq_n_f16(float16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))\n" "float16x8_t __arm_vbrsrq(float16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))\n" "float32x4_t __arm_vbrsrq_n_f32(float32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))\n" "float32x4_t __arm_vbrsrq(float32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))\n" "float16x8_t __arm_vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))\n" "float16x8_t __arm_vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))\n" "float32x4_t __arm_vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))\n" "float32x4_t __arm_vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))\n" "float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))\n" "float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))\n" "float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))\n" "float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))\n" "float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))\n" "float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))\n" "float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))\n" "float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))\n" "float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))\n" "float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))\n" "float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))\n" "float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))\n" "float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))\n" "float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))\n" "float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))\n" "float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))\n" "float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))\n" "float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))\n" "float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))\n" "float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))\n" "float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))\n" "float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))\n" "float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))\n" "float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))\n" "float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))\n" "float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))\n" "float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))\n" "float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))\n" "float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))\n" "float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))\n" "float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))\n" "float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))\n" "float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))\n" "float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))\n" "float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))\n" "float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))\n" "float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))\n" "float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))\n" "float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))\n" "float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))\n" "float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))\n" "float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))\n" "float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))\n" "float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))\n" "float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))\n" "float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))\n" "mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))\n" "mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))\n" "mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))\n" "mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))\n" "mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))\n" "mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))\n" "mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))\n" "mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))\n" "mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))\n" "mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))\n" "mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))\n" "mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))\n" "mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))\n" "mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))\n" "mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))\n" "mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))\n" "mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))\n" "mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))\n" "mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))\n" "mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))\n" "mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))\n" "mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))\n" "mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))\n" "mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))\n" "mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))\n" "mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))\n" "mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))\n" "mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))\n" "mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))\n" "mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))\n" "mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))\n" "mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))\n" "mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))\n" "mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))\n" "mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))\n" "mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))\n" "mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))\n" "mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))\n" "mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))\n" "mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))\n" "mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))\n" "mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))\n" "mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))\n" "mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))\n" "mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))\n" "mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))\n" "mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))\n" "mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))\n" "mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))\n" "mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))\n" "mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))\n" "mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))\n" "mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))\n" "mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))\n" "mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))\n" "mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))\n" "mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))\n" "mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))\n" "mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))\n" "mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))\n" "mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))\n" "mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))\n" "mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))\n" "mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))\n" "mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))\n" "mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))\n" "mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))\n" "mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))\n" "mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))\n" "mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))\n" "mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))\n" "mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))\n" "mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))\n" "mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))\n" "float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))\n" "float16x8_t __arm_vcmulq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))\n" "float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))\n" "float32x4_t __arm_vcmulq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))\n" "float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))\n" "float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))\n" "float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))\n" "float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))\n" "float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))\n" "float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))\n" "float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))\n" "float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))\n" "float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))\n" "float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))\n" "float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))\n" "float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))\n" "float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))\n" "float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))\n" "float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))\n" "float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))\n" "float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))\n" "float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))\n" "float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))\n" "float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))\n" "float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))\n" "float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))\n" "float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))\n" "float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))\n" "float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))\n" "float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))\n" "float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))\n" "float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))\n" "float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))\n" "float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))\n" "float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))\n" "float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))\n" "float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))\n" "float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))\n" "float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))\n" "float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))\n" "float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))\n" "float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))\n" "float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))\n" "float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))\n" "float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))\n" "float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))\n" "float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))\n" "float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))\n" "float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))\n" "float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))\n" "int16x8_t __arm_vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))\n" "int16x8_t __arm_vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))\n" "int32x4_t __arm_vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))\n" "int32x4_t __arm_vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))\n" "int16x8_t __arm_vcvtaq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))\n" "int32x4_t __arm_vcvtaq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))\n" "uint16x8_t __arm_vcvtaq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))\n" "uint32x4_t __arm_vcvtaq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))\n" "int16x8_t __arm_vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))\n" "int32x4_t __arm_vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))\n" "uint16x8_t __arm_vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))\n" "uint32x4_t __arm_vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))\n" "float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))\n" "float32x4_t __arm_vcvtbq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))\n" "float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))\n" "float32x4_t __arm_vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))\n" "float32x4_t __arm_vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))\n" "int16x8_t __arm_vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))\n" "int16x8_t __arm_vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))\n" "int32x4_t __arm_vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))\n" "int32x4_t __arm_vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))\n" "int16x8_t __arm_vcvtmq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))\n" "int32x4_t __arm_vcvtmq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))\n" "uint16x8_t __arm_vcvtmq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))\n" "uint32x4_t __arm_vcvtmq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))\n" "int16x8_t __arm_vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))\n" "int32x4_t __arm_vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))\n" "uint16x8_t __arm_vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))\n" "uint32x4_t __arm_vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))\n" "int16x8_t __arm_vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))\n" "int16x8_t __arm_vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))\n" "int32x4_t __arm_vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))\n" "int32x4_t __arm_vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))\n" "int16x8_t __arm_vcvtnq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))\n" "int32x4_t __arm_vcvtnq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))\n" "uint16x8_t __arm_vcvtnq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))\n" "uint32x4_t __arm_vcvtnq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))\n" "int16x8_t __arm_vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))\n" "int32x4_t __arm_vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))\n" "uint16x8_t __arm_vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))\n" "uint32x4_t __arm_vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))\n" "int16x8_t __arm_vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))\n" "int16x8_t __arm_vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))\n" "int32x4_t __arm_vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))\n" "int32x4_t __arm_vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))\n" "int16x8_t __arm_vcvtpq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))\n" "int32x4_t __arm_vcvtpq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))\n" "uint16x8_t __arm_vcvtpq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))\n" "uint32x4_t __arm_vcvtpq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))\n" "int16x8_t __arm_vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))\n" "int32x4_t __arm_vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))\n" "uint16x8_t __arm_vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))\n" "uint32x4_t __arm_vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))\n" "float16x8_t __arm_vcvtq_f16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))\n" "float16x8_t __arm_vcvtq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))\n" "float16x8_t __arm_vcvtq_f16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))\n" "float16x8_t __arm_vcvtq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))\n" "float32x4_t __arm_vcvtq_f32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))\n" "float32x4_t __arm_vcvtq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))\n" "float32x4_t __arm_vcvtq_f32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))\n" "float32x4_t __arm_vcvtq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))\n" "float16x8_t __arm_vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))\n" "float16x8_t __arm_vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))\n" "float16x8_t __arm_vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))\n" "float16x8_t __arm_vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))\n" "float32x4_t __arm_vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))\n" "float32x4_t __arm_vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))\n" "float32x4_t __arm_vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))\n" "float32x4_t __arm_vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))\n" "int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))\n" "int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))\n" "int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))\n" "int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))\n" "uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))\n" "uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))\n" "uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))\n" "uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))\n" "int16x8_t __arm_vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))\n" "int16x8_t __arm_vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))\n" "int32x4_t __arm_vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))\n" "int32x4_t __arm_vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))\n" "uint16x8_t __arm_vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))\n" "uint32x4_t __arm_vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))\n" "int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))\n" "int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))\n" "uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))\n" "uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))\n" "int16x8_t __arm_vcvtq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))\n" "int32x4_t __arm_vcvtq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))\n" "uint16x8_t __arm_vcvtq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))\n" "uint32x4_t __arm_vcvtq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))\n" "float16x8_t __arm_vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))\n" "float16x8_t __arm_vcvtq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))\n" "float16x8_t __arm_vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))\n" "float16x8_t __arm_vcvtq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))\n" "float32x4_t __arm_vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))\n" "float32x4_t __arm_vcvtq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))\n" "float32x4_t __arm_vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))\n" "float32x4_t __arm_vcvtq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))\n" "float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))\n" "float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))\n" "float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))\n" "float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))\n" "int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))\n" "int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))\n" "uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))\n" "uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))\n" "int16x8_t __arm_vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))\n" "int32x4_t __arm_vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))\n" "uint16x8_t __arm_vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))\n" "uint32x4_t __arm_vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))\n" "float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))\n" "float32x4_t __arm_vcvttq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))\n" "float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))\n" "float32x4_t __arm_vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))\n" "float32x4_t __arm_vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))\n" "float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))\n" "float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))\n" "float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))\n" "float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))\n" "float16x8_t __arm_vdupq_n_f16(float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))\n" "float32x4_t __arm_vdupq_n_f32(float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))\n" "float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))\n" "float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))\n" "float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))\n" "float16x8_t __arm_veorq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))\n" "float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))\n" "float32x4_t __arm_veorq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))\n" "float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))\n" "float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))\n" "float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))\n" "float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))\n" "float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))\n" "float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))\n" "float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))\n" "float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))\n" "float16x8_t __arm_vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))\n" "float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))\n" "float32x4_t __arm_vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))\n" "float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))\n" "float16x8_t __arm_vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))\n" "float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))\n" "float32x4_t __arm_vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))\n" "float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))\n" "float16x8_t __arm_vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))\n" "float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))\n" "float32x4_t __arm_vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))\n" "float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))\n" "float16x8_t __arm_vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))\n" "float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))\n" "float32x4_t __arm_vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))\n" "float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))\n" "float16x8_t __arm_vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))\n" "float16x8_t __arm_vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))\n" "float32x4_t __arm_vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))\n" "float32x4_t __arm_vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))\n" "float16x8_t __arm_vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))\n" "float16x8_t __arm_vfmasq(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))\n" "float32x4_t __arm_vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))\n" "float32x4_t __arm_vfmasq(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))\n" "float16x8_t __arm_vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))\n" "float16x8_t __arm_vfmsq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))\n" "float32x4_t __arm_vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))\n" "float32x4_t __arm_vfmsq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))\n" "float16x8_t __arm_vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))\n" "float16x8_t __arm_vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))\n" "float32x4_t __arm_vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))\n" "float32x4_t __arm_vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))\n" "float16_t __arm_vgetq_lane_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))\n" "float16_t __arm_vgetq_lane(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))\n" "float32_t __arm_vgetq_lane_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))\n" "float32_t __arm_vgetq_lane(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))\n" "float16x8_t __arm_vld1q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))\n" "float16x8_t __arm_vld1q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))\n" "float32x4_t __arm_vld1q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))\n" "float32x4_t __arm_vld1q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))\n" "float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))\n" "float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))\n" "float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))\n" "float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))\n" "float16x8x2_t __arm_vld2q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))\n" "float16x8x2_t __arm_vld2q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))\n" "float32x4x2_t __arm_vld2q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))\n" "float32x4x2_t __arm_vld2q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))\n" "float16x8x4_t __arm_vld4q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))\n" "float16x8x4_t __arm_vld4q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))\n" "float32x4x4_t __arm_vld4q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))\n" "float32x4x4_t __arm_vld4q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))\n" "float16x8_t __arm_vldrhq_f16(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))\n" "float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))\n" "float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))\n" "float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))\n" "float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))\n" "float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))\n" "float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))\n" "float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))\n" "float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))\n" "float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))\n" "float32x4_t __arm_vldrwq_f32(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))\n" "float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))\n" "float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))\n" "float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))\n" "float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))\n" "float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))\n" "float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))\n" "float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))\n" "float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))\n" "float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))\n" "float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))\n" "float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))\n" "float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))\n" "float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))\n" "float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))\n" "float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))\n" "float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))\n" "float16_t __arm_vmaxnmavq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))\n" "float16_t __arm_vmaxnmavq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))\n" "float32_t __arm_vmaxnmavq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))\n" "float32_t __arm_vmaxnmavq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))\n" "float16_t __arm_vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))\n" "float16_t __arm_vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))\n" "float32_t __arm_vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))\n" "float32_t __arm_vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))\n" "float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))\n" "float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))\n" "float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))\n" "float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))\n" "float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))\n" "float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))\n" "float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))\n" "float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))\n" "float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))\n" "float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))\n" "float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))\n" "float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))\n" "float16_t __arm_vmaxnmvq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))\n" "float16_t __arm_vmaxnmvq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))\n" "float32_t __arm_vmaxnmvq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))\n" "float32_t __arm_vmaxnmvq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))\n" "float16_t __arm_vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))\n" "float16_t __arm_vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))\n" "float32_t __arm_vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))\n" "float32_t __arm_vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))\n" "float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))\n" "float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))\n" "float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))\n" "float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))\n" "float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))\n" "float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))\n" "float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))\n" "float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))\n" "float16_t __arm_vminnmavq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))\n" "float16_t __arm_vminnmavq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))\n" "float32_t __arm_vminnmavq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))\n" "float32_t __arm_vminnmavq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))\n" "float16_t __arm_vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))\n" "float16_t __arm_vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))\n" "float32_t __arm_vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))\n" "float32_t __arm_vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))\n" "float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))\n" "float16x8_t __arm_vminnmq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))\n" "float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))\n" "float32x4_t __arm_vminnmq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))\n" "float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))\n" "float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))\n" "float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))\n" "float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))\n" "float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))\n" "float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))\n" "float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))\n" "float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))\n" "float16_t __arm_vminnmvq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))\n" "float16_t __arm_vminnmvq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))\n" "float32_t __arm_vminnmvq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))\n" "float32_t __arm_vminnmvq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))\n" "float16_t __arm_vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))\n" "float16_t __arm_vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))\n" "float32_t __arm_vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))\n" "float32_t __arm_vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))\n" "float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))\n" "float16x8_t __arm_vmulq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))\n" "float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))\n" "float32x4_t __arm_vmulq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))\n" "float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))\n" "float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))\n" "float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))\n" "float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))\n" "float16x8_t __arm_vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))\n" "float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))\n" "float32x4_t __arm_vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))\n" "float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))\n" "float16x8_t __arm_vmulq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))\n" "float16x8_t __arm_vmulq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))\n" "float32x4_t __arm_vmulq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))\n" "float32x4_t __arm_vmulq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))\n" "float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))\n" "float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))\n" "float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))\n" "float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))\n" "float16x8_t __arm_vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))\n" "float16x8_t __arm_vmulq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))\n" "float32x4_t __arm_vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))\n" "float32x4_t __arm_vmulq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))\n" "float16x8_t __arm_vnegq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))\n" "float16x8_t __arm_vnegq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))\n" "float32x4_t __arm_vnegq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))\n" "float32x4_t __arm_vnegq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))\n" "float16x8_t __arm_vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))\n" "float16x8_t __arm_vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))\n" "float32x4_t __arm_vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))\n" "float32x4_t __arm_vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))\n" "float16x8_t __arm_vnegq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))\n" "float16x8_t __arm_vnegq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))\n" "float32x4_t __arm_vnegq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))\n" "float32x4_t __arm_vnegq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))\n" "float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))\n" "float16x8_t __arm_vornq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))\n" "float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))\n" "float32x4_t __arm_vornq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))\n" "float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))\n" "float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))\n" "float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))\n" "float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))\n" "float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))\n" "float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))\n" "float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))\n" "float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))\n" "float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))\n" "float16x8_t __arm_vorrq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))\n" "float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))\n" "float32x4_t __arm_vorrq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))\n" "float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))\n" "float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))\n" "float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))\n" "float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))\n" "float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))\n" "float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))\n" "float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))\n" "float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))\n" "float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))\n" "float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))\n" "float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))\n" "float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))\n" "float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))\n" "float16x8_t __arm_vreinterpretq_f16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))\n" "float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))\n" "float16x8_t __arm_vreinterpretq_f16(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))\n" "float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))\n" "float16x8_t __arm_vreinterpretq_f16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))\n" "float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))\n" "float16x8_t __arm_vreinterpretq_f16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))\n" "float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))\n" "float16x8_t __arm_vreinterpretq_f16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))\n" "float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))\n" "float16x8_t __arm_vreinterpretq_f16(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))\n" "float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))\n" "float16x8_t __arm_vreinterpretq_f16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))\n" "float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))\n" "float16x8_t __arm_vreinterpretq_f16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))\n" "float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))\n" "float16x8_t __arm_vreinterpretq_f16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))\n" "float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))\n" "float32x4_t __arm_vreinterpretq_f32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))\n" "float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))\n" "float32x4_t __arm_vreinterpretq_f32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))\n" "float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))\n" "float32x4_t __arm_vreinterpretq_f32(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))\n" "float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))\n" "float32x4_t __arm_vreinterpretq_f32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))\n" "float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))\n" "float32x4_t __arm_vreinterpretq_f32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))\n" "float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))\n" "float32x4_t __arm_vreinterpretq_f32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))\n" "float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))\n" "float32x4_t __arm_vreinterpretq_f32(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))\n" "float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))\n" "float32x4_t __arm_vreinterpretq_f32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))\n" "float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))\n" "float32x4_t __arm_vreinterpretq_f32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))\n" "int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))\n" "int16x8_t __arm_vreinterpretq_s16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))\n" "int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))\n" "int16x8_t __arm_vreinterpretq_s16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))\n" "int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))\n" "int32x4_t __arm_vreinterpretq_s32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))\n" "int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))\n" "int32x4_t __arm_vreinterpretq_s32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))\n" "int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))\n" "int64x2_t __arm_vreinterpretq_s64(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))\n" "int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))\n" "int64x2_t __arm_vreinterpretq_s64(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))\n" "int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))\n" "int8x16_t __arm_vreinterpretq_s8(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))\n" "int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))\n" "int8x16_t __arm_vreinterpretq_s8(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))\n" "uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))\n" "uint16x8_t __arm_vreinterpretq_u16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))\n" "uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))\n" "uint16x8_t __arm_vreinterpretq_u16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))\n" "uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))\n" "uint32x4_t __arm_vreinterpretq_u32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))\n" "uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))\n" "uint32x4_t __arm_vreinterpretq_u32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))\n" "uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))\n" "uint64x2_t __arm_vreinterpretq_u64(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))\n" "uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))\n" "uint64x2_t __arm_vreinterpretq_u64(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))\n" "uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))\n" "uint8x16_t __arm_vreinterpretq_u8(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))\n" "uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))\n" "uint8x16_t __arm_vreinterpretq_u8(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))\n" "float16x8_t __arm_vrev32q_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))\n" "float16x8_t __arm_vrev32q(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))\n" "float16x8_t __arm_vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))\n" "float16x8_t __arm_vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))\n" "float16x8_t __arm_vrev32q_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))\n" "float16x8_t __arm_vrev32q_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))\n" "float16x8_t __arm_vrev64q_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))\n" "float16x8_t __arm_vrev64q(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))\n" "float32x4_t __arm_vrev64q_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))\n" "float32x4_t __arm_vrev64q(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))\n" "float16x8_t __arm_vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))\n" "float16x8_t __arm_vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))\n" "float32x4_t __arm_vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))\n" "float32x4_t __arm_vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))\n" "float16x8_t __arm_vrev64q_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))\n" "float16x8_t __arm_vrev64q_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))\n" "float32x4_t __arm_vrev64q_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))\n" "float32x4_t __arm_vrev64q_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))\n" "float16x8_t __arm_vrndaq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))\n" "float16x8_t __arm_vrndaq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))\n" "float32x4_t __arm_vrndaq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))\n" "float32x4_t __arm_vrndaq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))\n" "float16x8_t __arm_vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))\n" "float16x8_t __arm_vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))\n" "float32x4_t __arm_vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))\n" "float32x4_t __arm_vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))\n" "float16x8_t __arm_vrndaq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))\n" "float16x8_t __arm_vrndaq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))\n" "float32x4_t __arm_vrndaq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))\n" "float32x4_t __arm_vrndaq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))\n" "float16x8_t __arm_vrndmq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))\n" "float16x8_t __arm_vrndmq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))\n" "float32x4_t __arm_vrndmq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))\n" "float32x4_t __arm_vrndmq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))\n" "float16x8_t __arm_vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))\n" "float16x8_t __arm_vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))\n" "float32x4_t __arm_vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))\n" "float32x4_t __arm_vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))\n" "float16x8_t __arm_vrndmq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))\n" "float16x8_t __arm_vrndmq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))\n" "float32x4_t __arm_vrndmq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))\n" "float32x4_t __arm_vrndmq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))\n" "float16x8_t __arm_vrndnq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))\n" "float16x8_t __arm_vrndnq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))\n" "float32x4_t __arm_vrndnq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))\n" "float32x4_t __arm_vrndnq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))\n" "float16x8_t __arm_vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))\n" "float16x8_t __arm_vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))\n" "float32x4_t __arm_vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))\n" "float32x4_t __arm_vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))\n" "float16x8_t __arm_vrndnq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))\n" "float16x8_t __arm_vrndnq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))\n" "float32x4_t __arm_vrndnq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))\n" "float32x4_t __arm_vrndnq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))\n" "float16x8_t __arm_vrndpq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))\n" "float16x8_t __arm_vrndpq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))\n" "float32x4_t __arm_vrndpq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))\n" "float32x4_t __arm_vrndpq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))\n" "float16x8_t __arm_vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))\n" "float16x8_t __arm_vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))\n" "float32x4_t __arm_vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))\n" "float32x4_t __arm_vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))\n" "float16x8_t __arm_vrndpq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))\n" "float16x8_t __arm_vrndpq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))\n" "float32x4_t __arm_vrndpq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))\n" "float32x4_t __arm_vrndpq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))\n" "float16x8_t __arm_vrndq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))\n" "float16x8_t __arm_vrndq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))\n" "float32x4_t __arm_vrndq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))\n" "float32x4_t __arm_vrndq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))\n" "float16x8_t __arm_vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))\n" "float16x8_t __arm_vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))\n" "float32x4_t __arm_vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))\n" "float32x4_t __arm_vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))\n" "float16x8_t __arm_vrndq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))\n" "float16x8_t __arm_vrndq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))\n" "float32x4_t __arm_vrndq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))\n" "float32x4_t __arm_vrndq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))\n" "float16x8_t __arm_vrndxq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))\n" "float16x8_t __arm_vrndxq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))\n" "float32x4_t __arm_vrndxq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))\n" "float32x4_t __arm_vrndxq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))\n" "float16x8_t __arm_vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))\n" "float16x8_t __arm_vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))\n" "float32x4_t __arm_vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))\n" "float32x4_t __arm_vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))\n" "float16x8_t __arm_vrndxq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))\n" "float16x8_t __arm_vrndxq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))\n" "float32x4_t __arm_vrndxq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))\n" "float32x4_t __arm_vrndxq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))\n" "float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))\n" "float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))\n" "float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))\n" "float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))\n" "void __arm_vst1q_f16(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))\n" "void __arm_vst1q(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))\n" "void __arm_vst1q_f32(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))\n" "void __arm_vst1q(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))\n" "void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))\n" "void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))\n" "void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))\n" "void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))\n" "void __arm_vst2q_f16(float16_t *, float16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))\n" "void __arm_vst2q(float16_t *, float16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))\n" "void __arm_vst2q_f32(float32_t *, float32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))\n" "void __arm_vst2q(float32_t *, float32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))\n" "void __arm_vst4q_f16(float16_t *, float16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))\n" "void __arm_vst4q(float16_t *, float16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))\n" "void __arm_vst4q_f32(float32_t *, float32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))\n" "void __arm_vst4q(float32_t *, float32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))\n" "void __arm_vstrhq_f16(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))\n" "void __arm_vstrhq(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))\n" "void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))\n" "void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))\n" "void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))\n" "void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))\n" "void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))\n" "void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))\n" "void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))\n" "void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))\n" "void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))\n" "void __arm_vstrwq_f32(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))\n" "void __arm_vstrwq(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))\n" "void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))\n" "void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))\n" "void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))\n" "void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))\n" "void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))\n" "void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))\n" "void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))\n" "void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))\n" "void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))\n" "void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))\n" "void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))\n" "void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))\n" "void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))\n" "void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))\n" "void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))\n" "void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))\n" "void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))\n" "float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))\n" "float16x8_t __arm_vsubq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))\n" "float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))\n" "float32x4_t __arm_vsubq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))\n" "float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))\n" "float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))\n" "float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))\n" "float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))\n" "float16x8_t __arm_vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))\n" "float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))\n" "float32x4_t __arm_vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))\n" "float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))\n" "float16x8_t __arm_vsubq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))\n" "float16x8_t __arm_vsubq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))\n" "float32x4_t __arm_vsubq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))\n" "float32x4_t __arm_vsubq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))\n" "float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))\n" "float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))\n" "float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))\n" "float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))\n" "float16x8_t __arm_vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))\n" "float16x8_t __arm_vsubq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))\n" "float32x4_t __arm_vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))\n" "float32x4_t __arm_vsubq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))\n" "float16x8_t __arm_vuninitializedq_f16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))\n" "float32x4_t __arm_vuninitializedq_f32();\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))\n" "float16x8_t __arm_vuninitializedq(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))\n" "float32x4_t __arm_vuninitializedq(float32x4_t);\n" "\n" "#endif /* (__ARM_FEATURE_MVE & 2) */\n" "\n" "#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)\n" "\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))\n" "int64_t asrl(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))\n" "uint64_t lsll(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))\n" "int32_t sqrshr(int32_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))\n" "int64_t sqrshrl(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))\n" "int64_t sqrshrl_sat48(int64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))\n" "int32_t sqshl(int32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))\n" "int64_t sqshll(int64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))\n" "int32_t srshr(int32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))\n" "int64_t srshrl(int64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))\n" "uint32_t uqrshl(uint32_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))\n" "uint64_t uqrshll(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))\n" "uint64_t uqrshll_sat48(uint64_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))\n" "uint32_t uqshl(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))\n" "uint64_t uqshll(uint64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))\n" "uint32_t urshr(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))\n" "uint64_t urshrl(uint64_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))\n" "uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))\n" "uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))\n" "uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))\n" "uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))\n" "uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))\n" "uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))\n" "uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))\n" "uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))\n" "uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))\n" "uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))\n" "uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))\n" "uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))\n" "uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))\n" "uint32_t vabavq(uint32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))\n" "uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))\n" "uint32_t vabavq(uint32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))\n" "uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))\n" "uint32_t vabavq(uint32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))\n" "uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))\n" "uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))\n" "uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))\n" "uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))\n" "uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))\n" "uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))\n" "int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))\n" "int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))\n" "int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))\n" "int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))\n" "int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))\n" "int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))\n" "uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))\n" "uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))\n" "uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))\n" "uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))\n" "uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))\n" "uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))\n" "int16x8_t vabdq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))\n" "int16x8_t vabdq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))\n" "int32x4_t vabdq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))\n" "int32x4_t vabdq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))\n" "int8x16_t vabdq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))\n" "int8x16_t vabdq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))\n" "uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))\n" "uint16x8_t vabdq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))\n" "uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))\n" "uint32x4_t vabdq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))\n" "uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))\n" "uint8x16_t vabdq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))\n" "int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))\n" "int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))\n" "int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))\n" "int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))\n" "int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))\n" "int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))\n" "uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))\n" "uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))\n" "uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))\n" "uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))\n" "uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))\n" "uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))\n" "int16x8_t vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))\n" "int16x8_t vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))\n" "int32x4_t vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))\n" "int32x4_t vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))\n" "int8x16_t vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))\n" "int8x16_t vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))\n" "int16x8_t vabsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))\n" "int16x8_t vabsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))\n" "int32x4_t vabsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))\n" "int32x4_t vabsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))\n" "int8x16_t vabsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))\n" "int8x16_t vabsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))\n" "int16x8_t vabsq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))\n" "int16x8_t vabsq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))\n" "int32x4_t vabsq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))\n" "int32x4_t vabsq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))\n" "int8x16_t vabsq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))\n" "int8x16_t vabsq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))\n" "int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))\n" "int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))\n" "uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))\n" "uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))\n" "int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))\n" "int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))\n" "uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))\n" "uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))\n" "int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))\n" "int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))\n" "uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))\n" "uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))\n" "int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))\n" "int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))\n" "uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))\n" "uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))\n" "int64_t vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))\n" "int64_t vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))\n" "uint64_t vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))\n" "uint64_t vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))\n" "int64_t vaddlvaq_s32(int64_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))\n" "int64_t vaddlvaq(int64_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))\n" "uint64_t vaddlvaq_u32(uint64_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))\n" "uint64_t vaddlvaq(uint64_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))\n" "int64_t vaddlvq_p_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))\n" "int64_t vaddlvq_p(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))\n" "uint64_t vaddlvq_p_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))\n" "uint64_t vaddlvq_p(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))\n" "int64_t vaddlvq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))\n" "int64_t vaddlvq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))\n" "uint64_t vaddlvq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))\n" "uint64_t vaddlvq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))\n" "int16x8_t vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))\n" "int16x8_t vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))\n" "int32x4_t vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))\n" "int32x4_t vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))\n" "int8x16_t vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))\n" "int8x16_t vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))\n" "uint16x8_t vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))\n" "uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))\n" "uint32x4_t vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))\n" "uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))\n" "uint8x16_t vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))\n" "uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))\n" "int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))\n" "int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))\n" "int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))\n" "int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))\n" "int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))\n" "int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))\n" "uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))\n" "uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))\n" "uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))\n" "uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))\n" "uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))\n" "uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))\n" "int16x8_t vaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))\n" "int16x8_t vaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))\n" "int32x4_t vaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))\n" "int32x4_t vaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))\n" "int8x16_t vaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))\n" "int8x16_t vaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))\n" "uint16x8_t vaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))\n" "uint16x8_t vaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))\n" "uint32x4_t vaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))\n" "uint32x4_t vaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))\n" "uint8x16_t vaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))\n" "uint8x16_t vaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))\n" "int16x8_t vaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))\n" "int16x8_t vaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))\n" "int32x4_t vaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))\n" "int32x4_t vaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))\n" "int8x16_t vaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))\n" "int8x16_t vaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))\n" "uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))\n" "uint16x8_t vaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))\n" "uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))\n" "uint32x4_t vaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))\n" "uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))\n" "uint8x16_t vaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))\n" "int16x8_t vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))\n" "int16x8_t vaddq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))\n" "int32x4_t vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))\n" "int32x4_t vaddq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))\n" "int8x16_t vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))\n" "int8x16_t vaddq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))\n" "uint16x8_t vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))\n" "uint16x8_t vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))\n" "uint32x4_t vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))\n" "uint32x4_t vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))\n" "uint8x16_t vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))\n" "uint8x16_t vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))\n" "int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))\n" "int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))\n" "int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))\n" "int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))\n" "int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))\n" "int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))\n" "uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))\n" "uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))\n" "uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))\n" "uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))\n" "uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))\n" "uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))\n" "int32_t vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))\n" "int32_t vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))\n" "int32_t vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))\n" "int32_t vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))\n" "int32_t vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))\n" "int32_t vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))\n" "uint32_t vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))\n" "uint32_t vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))\n" "uint32_t vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))\n" "uint32_t vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))\n" "uint32_t vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))\n" "uint32_t vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))\n" "int32_t vaddvaq_s16(int32_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))\n" "int32_t vaddvaq(int32_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))\n" "int32_t vaddvaq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))\n" "int32_t vaddvaq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))\n" "int32_t vaddvaq_s8(int32_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))\n" "int32_t vaddvaq(int32_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))\n" "uint32_t vaddvaq_u16(uint32_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))\n" "uint32_t vaddvaq(uint32_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))\n" "uint32_t vaddvaq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))\n" "uint32_t vaddvaq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))\n" "uint32_t vaddvaq_u8(uint32_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))\n" "uint32_t vaddvaq(uint32_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))\n" "int32_t vaddvq_p_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))\n" "int32_t vaddvq_p(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))\n" "int32_t vaddvq_p_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))\n" "int32_t vaddvq_p(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))\n" "int32_t vaddvq_p_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))\n" "int32_t vaddvq_p(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))\n" "uint32_t vaddvq_p_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))\n" "uint32_t vaddvq_p(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))\n" "uint32_t vaddvq_p_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))\n" "uint32_t vaddvq_p(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))\n" "uint32_t vaddvq_p_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))\n" "uint32_t vaddvq_p(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))\n" "int32_t vaddvq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))\n" "int32_t vaddvq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))\n" "int32_t vaddvq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))\n" "int32_t vaddvq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))\n" "int32_t vaddvq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))\n" "int32_t vaddvq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))\n" "uint32_t vaddvq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))\n" "uint32_t vaddvq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))\n" "uint32_t vaddvq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))\n" "uint32_t vaddvq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))\n" "uint32_t vaddvq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))\n" "uint32_t vaddvq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))\n" "int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))\n" "int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))\n" "int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))\n" "int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))\n" "int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))\n" "int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))\n" "uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))\n" "uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))\n" "uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))\n" "uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))\n" "uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))\n" "uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))\n" "int16x8_t vandq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))\n" "int16x8_t vandq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))\n" "int32x4_t vandq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))\n" "int32x4_t vandq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))\n" "int8x16_t vandq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))\n" "int8x16_t vandq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))\n" "uint16x8_t vandq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))\n" "uint16x8_t vandq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))\n" "uint32x4_t vandq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))\n" "uint32x4_t vandq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))\n" "uint8x16_t vandq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))\n" "uint8x16_t vandq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))\n" "int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))\n" "int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))\n" "int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))\n" "int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))\n" "int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))\n" "int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))\n" "uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))\n" "uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))\n" "uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))\n" "uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))\n" "uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))\n" "uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))\n" "int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))\n" "int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))\n" "int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))\n" "int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))\n" "uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))\n" "uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))\n" "uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))\n" "uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))\n" "int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))\n" "int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))\n" "int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))\n" "int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))\n" "int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))\n" "int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))\n" "uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))\n" "uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))\n" "uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))\n" "uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))\n" "uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))\n" "uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))\n" "int16x8_t vbicq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))\n" "int16x8_t vbicq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))\n" "int32x4_t vbicq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))\n" "int32x4_t vbicq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))\n" "uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))\n" "uint16x8_t vbicq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))\n" "uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))\n" "uint32x4_t vbicq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))\n" "int16x8_t vbicq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))\n" "int16x8_t vbicq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))\n" "int32x4_t vbicq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))\n" "int32x4_t vbicq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))\n" "int8x16_t vbicq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))\n" "int8x16_t vbicq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))\n" "uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))\n" "uint16x8_t vbicq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))\n" "uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))\n" "uint32x4_t vbicq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))\n" "uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))\n" "uint8x16_t vbicq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))\n" "int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))\n" "int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))\n" "int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))\n" "int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))\n" "int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))\n" "int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))\n" "uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))\n" "uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))\n" "uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))\n" "uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))\n" "uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))\n" "uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))\n" "int16x8_t vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))\n" "int16x8_t vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))\n" "int32x4_t vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))\n" "int32x4_t vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))\n" "int8x16_t vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))\n" "int8x16_t vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))\n" "uint16x8_t vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))\n" "uint16x8_t vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))\n" "uint32x4_t vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))\n" "uint32x4_t vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))\n" "uint8x16_t vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))\n" "uint8x16_t vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))\n" "int16x8_t vbrsrq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))\n" "int16x8_t vbrsrq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))\n" "int32x4_t vbrsrq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))\n" "int32x4_t vbrsrq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))\n" "int8x16_t vbrsrq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))\n" "int8x16_t vbrsrq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))\n" "uint16x8_t vbrsrq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))\n" "uint16x8_t vbrsrq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))\n" "uint32x4_t vbrsrq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))\n" "uint32x4_t vbrsrq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))\n" "uint8x16_t vbrsrq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))\n" "uint8x16_t vbrsrq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))\n" "int16x8_t vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))\n" "int16x8_t vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))\n" "int32x4_t vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))\n" "int32x4_t vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))\n" "int8x16_t vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))\n" "int8x16_t vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))\n" "uint16x8_t vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))\n" "uint16x8_t vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))\n" "uint32x4_t vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))\n" "uint32x4_t vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))\n" "uint8x16_t vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))\n" "uint8x16_t vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))\n" "int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))\n" "int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))\n" "int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))\n" "int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))\n" "int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))\n" "int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))\n" "uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))\n" "uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))\n" "uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))\n" "uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))\n" "uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))\n" "uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))\n" "int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))\n" "int16x8_t vcaddq_rot270(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))\n" "int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))\n" "int32x4_t vcaddq_rot270(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))\n" "int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))\n" "int8x16_t vcaddq_rot270(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))\n" "uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))\n" "uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))\n" "uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))\n" "uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))\n" "uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))\n" "uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))\n" "int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))\n" "int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))\n" "int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))\n" "int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))\n" "int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))\n" "int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))\n" "uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))\n" "uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))\n" "uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))\n" "uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))\n" "uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))\n" "uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))\n" "int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))\n" "int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))\n" "int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))\n" "int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))\n" "int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))\n" "int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))\n" "uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))\n" "uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))\n" "uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))\n" "uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))\n" "uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))\n" "uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))\n" "int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))\n" "int16x8_t vcaddq_rot90(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))\n" "int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))\n" "int32x4_t vcaddq_rot90(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))\n" "int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))\n" "int8x16_t vcaddq_rot90(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))\n" "uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))\n" "uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))\n" "uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))\n" "uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))\n" "uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))\n" "uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))\n" "int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))\n" "int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))\n" "int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))\n" "int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))\n" "int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))\n" "int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))\n" "uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))\n" "uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))\n" "uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))\n" "uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))\n" "uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))\n" "uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))\n" "int16x8_t vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))\n" "int16x8_t vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))\n" "int32x4_t vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))\n" "int32x4_t vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))\n" "int8x16_t vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))\n" "int8x16_t vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))\n" "int16x8_t vclsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))\n" "int16x8_t vclsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))\n" "int32x4_t vclsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))\n" "int32x4_t vclsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))\n" "int8x16_t vclsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))\n" "int8x16_t vclsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))\n" "int16x8_t vclsq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))\n" "int16x8_t vclsq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))\n" "int32x4_t vclsq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))\n" "int32x4_t vclsq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))\n" "int8x16_t vclsq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))\n" "int8x16_t vclsq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))\n" "int16x8_t vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))\n" "int16x8_t vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))\n" "int32x4_t vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))\n" "int32x4_t vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))\n" "int8x16_t vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))\n" "int8x16_t vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))\n" "uint16x8_t vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))\n" "uint16x8_t vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))\n" "uint32x4_t vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))\n" "uint32x4_t vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))\n" "uint8x16_t vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))\n" "uint8x16_t vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))\n" "int16x8_t vclzq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))\n" "int16x8_t vclzq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))\n" "int32x4_t vclzq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))\n" "int32x4_t vclzq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))\n" "int8x16_t vclzq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))\n" "int8x16_t vclzq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))\n" "uint16x8_t vclzq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))\n" "uint16x8_t vclzq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))\n" "uint32x4_t vclzq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))\n" "uint32x4_t vclzq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))\n" "uint8x16_t vclzq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))\n" "uint8x16_t vclzq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))\n" "int16x8_t vclzq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))\n" "int16x8_t vclzq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))\n" "int32x4_t vclzq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))\n" "int32x4_t vclzq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))\n" "int8x16_t vclzq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))\n" "int8x16_t vclzq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))\n" "uint16x8_t vclzq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))\n" "uint16x8_t vclzq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))\n" "uint32x4_t vclzq_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))\n" "uint32x4_t vclzq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))\n" "uint8x16_t vclzq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))\n" "uint8x16_t vclzq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))\n" "mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))\n" "mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))\n" "mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))\n" "mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))\n" "mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))\n" "mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))\n" "mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))\n" "mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))\n" "mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))\n" "mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))\n" "mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))\n" "mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))\n" "mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))\n" "mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))\n" "mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))\n" "mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))\n" "mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))\n" "mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))\n" "mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))\n" "mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))\n" "mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))\n" "mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))\n" "mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))\n" "mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))\n" "mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))\n" "mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))\n" "mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))\n" "mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))\n" "mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))\n" "mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))\n" "mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))\n" "mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))\n" "mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))\n" "mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))\n" "mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))\n" "mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))\n" "mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))\n" "mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))\n" "mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))\n" "mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))\n" "mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))\n" "mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))\n" "mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))\n" "mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))\n" "mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))\n" "mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))\n" "mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))\n" "mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))\n" "mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))\n" "mve_pred16_t vcmpeqq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))\n" "mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))\n" "mve_pred16_t vcmpeqq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))\n" "mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))\n" "mve_pred16_t vcmpeqq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))\n" "mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))\n" "mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))\n" "mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))\n" "mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))\n" "mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))\n" "mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))\n" "mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))\n" "mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))\n" "mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))\n" "mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))\n" "mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))\n" "mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))\n" "mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))\n" "mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))\n" "mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))\n" "mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))\n" "mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))\n" "mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))\n" "mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))\n" "mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))\n" "mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))\n" "mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))\n" "mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))\n" "mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))\n" "mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))\n" "mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))\n" "mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))\n" "mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))\n" "mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))\n" "mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))\n" "mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))\n" "mve_pred16_t vcmpgeq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))\n" "mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))\n" "mve_pred16_t vcmpgeq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))\n" "mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))\n" "mve_pred16_t vcmpgeq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))\n" "mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))\n" "mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))\n" "mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))\n" "mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))\n" "mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))\n" "mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))\n" "mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))\n" "mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))\n" "mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))\n" "mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))\n" "mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))\n" "mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))\n" "mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))\n" "mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))\n" "mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))\n" "mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))\n" "mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))\n" "mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))\n" "mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))\n" "mve_pred16_t vcmpgtq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))\n" "mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))\n" "mve_pred16_t vcmpgtq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))\n" "mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))\n" "mve_pred16_t vcmpgtq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))\n" "mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))\n" "mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))\n" "mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))\n" "mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))\n" "mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))\n" "mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))\n" "mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))\n" "mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))\n" "mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))\n" "mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))\n" "mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))\n" "mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))\n" "mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))\n" "mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))\n" "mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))\n" "mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))\n" "mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))\n" "mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))\n" "mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))\n" "mve_pred16_t vcmphiq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))\n" "mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))\n" "mve_pred16_t vcmphiq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))\n" "mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))\n" "mve_pred16_t vcmphiq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))\n" "mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))\n" "mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))\n" "mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))\n" "mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))\n" "mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))\n" "mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))\n" "mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))\n" "mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))\n" "mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))\n" "mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))\n" "mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))\n" "mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))\n" "mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))\n" "mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))\n" "mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))\n" "mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))\n" "mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))\n" "mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))\n" "mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))\n" "mve_pred16_t vcmpleq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))\n" "mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))\n" "mve_pred16_t vcmpleq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))\n" "mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))\n" "mve_pred16_t vcmpleq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))\n" "mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))\n" "mve_pred16_t vcmpleq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))\n" "mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))\n" "mve_pred16_t vcmpleq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))\n" "mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))\n" "mve_pred16_t vcmpleq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))\n" "mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))\n" "mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))\n" "mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))\n" "mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))\n" "mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))\n" "mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))\n" "mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))\n" "mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))\n" "mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))\n" "mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))\n" "mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))\n" "mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))\n" "mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))\n" "mve_pred16_t vcmpltq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))\n" "mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))\n" "mve_pred16_t vcmpltq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))\n" "mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))\n" "mve_pred16_t vcmpltq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))\n" "mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))\n" "mve_pred16_t vcmpltq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))\n" "mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))\n" "mve_pred16_t vcmpltq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))\n" "mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))\n" "mve_pred16_t vcmpltq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))\n" "mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))\n" "mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))\n" "mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))\n" "mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))\n" "mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))\n" "mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))\n" "mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))\n" "mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))\n" "mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))\n" "mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))\n" "mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))\n" "mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))\n" "mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))\n" "mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))\n" "mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))\n" "mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))\n" "mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))\n" "mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))\n" "mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))\n" "mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))\n" "mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))\n" "mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))\n" "mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))\n" "mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))\n" "mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))\n" "mve_pred16_t vcmpneq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))\n" "mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))\n" "mve_pred16_t vcmpneq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))\n" "mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))\n" "mve_pred16_t vcmpneq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))\n" "mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))\n" "mve_pred16_t vcmpneq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))\n" "mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))\n" "mve_pred16_t vcmpneq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))\n" "mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))\n" "mve_pred16_t vcmpneq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))\n" "mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))\n" "mve_pred16_t vcmpneq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))\n" "mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))\n" "mve_pred16_t vcmpneq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))\n" "mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))\n" "mve_pred16_t vcmpneq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))\n" "mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))\n" "mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))\n" "mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))\n" "mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))\n" "mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))\n" "mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))\n" "int16x8_t vcreateq_s16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))\n" "int32x4_t vcreateq_s32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))\n" "int64x2_t vcreateq_s64(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))\n" "int8x16_t vcreateq_s8(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))\n" "uint16x8_t vcreateq_u16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))\n" "uint32x4_t vcreateq_u32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))\n" "uint64x2_t vcreateq_u64(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))\n" "uint8x16_t vcreateq_u8(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))\n" "mve_pred16_t vctp16q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))\n" "mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))\n" "mve_pred16_t vctp32q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))\n" "mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))\n" "mve_pred16_t vctp64q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))\n" "mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))\n" "mve_pred16_t vctp8q(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))\n" "mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))\n" "uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))\n" "uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))\n" "uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))\n" "uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))\n" "uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))\n" "uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))\n" "uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))\n" "uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))\n" "uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))\n" "uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))\n" "uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))\n" "uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))\n" "uint16x8_t vddupq_n_u16(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))\n" "uint16x8_t vddupq_u16(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))\n" "uint32x4_t vddupq_n_u32(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))\n" "uint32x4_t vddupq_u32(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))\n" "uint8x16_t vddupq_n_u8(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))\n" "uint8x16_t vddupq_u8(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))\n" "uint16x8_t vddupq_wb_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))\n" "uint16x8_t vddupq_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))\n" "uint32x4_t vddupq_wb_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))\n" "uint32x4_t vddupq_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))\n" "uint8x16_t vddupq_wb_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))\n" "uint8x16_t vddupq_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))\n" "uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))\n" "uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))\n" "uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))\n" "uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))\n" "uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))\n" "uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))\n" "uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))\n" "uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))\n" "uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))\n" "uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))\n" "uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))\n" "uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))\n" "int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))\n" "int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))\n" "int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))\n" "int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))\n" "int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))\n" "int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))\n" "uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))\n" "uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))\n" "uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))\n" "uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))\n" "uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))\n" "uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))\n" "int16x8_t vdupq_n_s16(int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))\n" "int32x4_t vdupq_n_s32(int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))\n" "int8x16_t vdupq_n_s8(int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))\n" "uint16x8_t vdupq_n_u16(uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))\n" "uint32x4_t vdupq_n_u32(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))\n" "uint8x16_t vdupq_n_u8(uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))\n" "int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))\n" "int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))\n" "int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))\n" "uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))\n" "uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))\n" "uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))\n" "uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))\n" "uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))\n" "uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))\n" "uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))\n" "uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))\n" "uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))\n" "uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))\n" "uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))\n" "uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))\n" "uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))\n" "uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))\n" "uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))\n" "uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))\n" "uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))\n" "uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))\n" "uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))\n" "uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))\n" "uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))\n" "uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))\n" "uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))\n" "uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))\n" "uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))\n" "uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))\n" "uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))\n" "uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))\n" "uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))\n" "uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))\n" "uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))\n" "uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))\n" "uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))\n" "uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))\n" "uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))\n" "uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))\n" "uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))\n" "uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))\n" "uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))\n" "int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))\n" "int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))\n" "int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))\n" "int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))\n" "int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))\n" "int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))\n" "uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))\n" "uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))\n" "uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))\n" "uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))\n" "uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))\n" "uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))\n" "int16x8_t veorq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))\n" "int16x8_t veorq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))\n" "int32x4_t veorq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))\n" "int32x4_t veorq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))\n" "int8x16_t veorq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))\n" "int8x16_t veorq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))\n" "uint16x8_t veorq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))\n" "uint16x8_t veorq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))\n" "uint32x4_t veorq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))\n" "uint32x4_t veorq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))\n" "uint8x16_t veorq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))\n" "uint8x16_t veorq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))\n" "int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))\n" "int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))\n" "int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))\n" "int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))\n" "int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))\n" "int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))\n" "uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))\n" "uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))\n" "uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))\n" "uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))\n" "uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))\n" "uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))\n" "int16_t vgetq_lane_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))\n" "int16_t vgetq_lane(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))\n" "int32_t vgetq_lane_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))\n" "int32_t vgetq_lane(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))\n" "int64_t vgetq_lane_s64(int64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))\n" "int64_t vgetq_lane(int64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))\n" "int8_t vgetq_lane_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))\n" "int8_t vgetq_lane(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))\n" "uint16_t vgetq_lane_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))\n" "uint16_t vgetq_lane(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))\n" "uint32_t vgetq_lane_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))\n" "uint32_t vgetq_lane(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))\n" "uint64_t vgetq_lane_u64(uint64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))\n" "uint64_t vgetq_lane(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))\n" "uint8_t vgetq_lane_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))\n" "uint8_t vgetq_lane(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))\n" "int16x8_t vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))\n" "int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))\n" "int32x4_t vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))\n" "int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))\n" "int8x16_t vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))\n" "int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))\n" "uint16x8_t vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))\n" "uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))\n" "uint32x4_t vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))\n" "uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))\n" "uint8x16_t vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))\n" "uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))\n" "int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))\n" "int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))\n" "int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))\n" "int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))\n" "int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))\n" "int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))\n" "uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))\n" "uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))\n" "uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))\n" "uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))\n" "uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))\n" "uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))\n" "int16x8_t vhaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))\n" "int16x8_t vhaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))\n" "int32x4_t vhaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))\n" "int32x4_t vhaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))\n" "int8x16_t vhaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))\n" "int8x16_t vhaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))\n" "uint16x8_t vhaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))\n" "uint16x8_t vhaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))\n" "uint32x4_t vhaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))\n" "uint32x4_t vhaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))\n" "uint8x16_t vhaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))\n" "uint8x16_t vhaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))\n" "int16x8_t vhaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))\n" "int16x8_t vhaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))\n" "int32x4_t vhaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))\n" "int32x4_t vhaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))\n" "int8x16_t vhaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))\n" "int8x16_t vhaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))\n" "uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))\n" "uint16x8_t vhaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))\n" "uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))\n" "uint32x4_t vhaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))\n" "uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))\n" "uint8x16_t vhaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))\n" "int16x8_t vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))\n" "int16x8_t vhaddq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))\n" "int32x4_t vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))\n" "int32x4_t vhaddq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))\n" "int8x16_t vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))\n" "int8x16_t vhaddq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))\n" "uint16x8_t vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))\n" "uint16x8_t vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))\n" "uint32x4_t vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))\n" "uint32x4_t vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))\n" "uint8x16_t vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))\n" "uint8x16_t vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))\n" "int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))\n" "int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))\n" "int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))\n" "int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))\n" "int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))\n" "int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))\n" "uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))\n" "uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))\n" "uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))\n" "uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))\n" "uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))\n" "uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))\n" "int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))\n" "int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))\n" "int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))\n" "int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))\n" "int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))\n" "int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))\n" "int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))\n" "int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))\n" "int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))\n" "int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))\n" "int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))\n" "int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))\n" "int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))\n" "int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))\n" "int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))\n" "int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))\n" "int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))\n" "int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))\n" "int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))\n" "int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))\n" "int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))\n" "int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))\n" "int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))\n" "int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))\n" "int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))\n" "int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))\n" "int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))\n" "int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))\n" "int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))\n" "int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))\n" "int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))\n" "int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))\n" "int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))\n" "int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))\n" "int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))\n" "int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))\n" "int16x8_t vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))\n" "int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))\n" "int32x4_t vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))\n" "int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))\n" "int8x16_t vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))\n" "int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))\n" "uint16x8_t vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))\n" "uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))\n" "uint32x4_t vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))\n" "uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))\n" "uint8x16_t vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))\n" "uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))\n" "int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))\n" "int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))\n" "int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))\n" "int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))\n" "int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))\n" "int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))\n" "uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))\n" "uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))\n" "uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))\n" "uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))\n" "uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))\n" "uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))\n" "int16x8_t vhsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))\n" "int16x8_t vhsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))\n" "int32x4_t vhsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))\n" "int32x4_t vhsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))\n" "int8x16_t vhsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))\n" "int8x16_t vhsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))\n" "uint16x8_t vhsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))\n" "uint16x8_t vhsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))\n" "uint32x4_t vhsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))\n" "uint32x4_t vhsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))\n" "uint8x16_t vhsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))\n" "uint8x16_t vhsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))\n" "int16x8_t vhsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))\n" "int16x8_t vhsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))\n" "int32x4_t vhsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))\n" "int32x4_t vhsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))\n" "int8x16_t vhsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))\n" "int8x16_t vhsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))\n" "uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))\n" "uint16x8_t vhsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))\n" "uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))\n" "uint32x4_t vhsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))\n" "uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))\n" "uint8x16_t vhsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))\n" "int16x8_t vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))\n" "int16x8_t vhsubq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))\n" "int32x4_t vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))\n" "int32x4_t vhsubq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))\n" "int8x16_t vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))\n" "int8x16_t vhsubq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))\n" "uint16x8_t vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))\n" "uint16x8_t vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))\n" "uint32x4_t vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))\n" "uint32x4_t vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))\n" "uint8x16_t vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))\n" "uint8x16_t vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))\n" "int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))\n" "int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))\n" "int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))\n" "int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))\n" "int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))\n" "int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))\n" "uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))\n" "uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))\n" "uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))\n" "uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))\n" "uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))\n" "uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))\n" "uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))\n" "uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))\n" "uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))\n" "uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))\n" "uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))\n" "uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))\n" "uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))\n" "uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))\n" "uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))\n" "uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))\n" "uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))\n" "uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))\n" "uint16x8_t vidupq_n_u16(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))\n" "uint16x8_t vidupq_u16(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))\n" "uint32x4_t vidupq_n_u32(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))\n" "uint32x4_t vidupq_u32(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))\n" "uint8x16_t vidupq_n_u8(uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))\n" "uint8x16_t vidupq_u8(uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))\n" "uint16x8_t vidupq_wb_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))\n" "uint16x8_t vidupq_u16(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))\n" "uint32x4_t vidupq_wb_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))\n" "uint32x4_t vidupq_u32(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))\n" "uint8x16_t vidupq_wb_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))\n" "uint8x16_t vidupq_u8(uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))\n" "uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))\n" "uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))\n" "uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))\n" "uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))\n" "uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))\n" "uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))\n" "uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))\n" "uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))\n" "uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))\n" "uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))\n" "uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))\n" "uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))\n" "uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))\n" "uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))\n" "uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))\n" "uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))\n" "uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))\n" "uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))\n" "uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))\n" "uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))\n" "uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))\n" "uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))\n" "uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))\n" "uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))\n" "uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))\n" "uint16x8_t viwdupq_u16(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))\n" "uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))\n" "uint32x4_t viwdupq_u32(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))\n" "uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))\n" "uint8x16_t viwdupq_u8(uint32_t, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))\n" "uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))\n" "uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))\n" "uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))\n" "uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))\n" "uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))\n" "uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))\n" "uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))\n" "uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))\n" "uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))\n" "uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))\n" "uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))\n" "uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))\n" "uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))\n" "uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))\n" "uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))\n" "uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))\n" "uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))\n" "uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))\n" "int16x8_t vld1q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))\n" "int16x8_t vld1q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))\n" "int32x4_t vld1q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))\n" "int32x4_t vld1q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))\n" "int8x16_t vld1q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))\n" "int8x16_t vld1q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))\n" "uint16x8_t vld1q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))\n" "uint16x8_t vld1q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))\n" "uint32x4_t vld1q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))\n" "uint32x4_t vld1q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))\n" "uint8x16_t vld1q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))\n" "uint8x16_t vld1q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))\n" "int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))\n" "int16x8_t vld1q_z(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))\n" "int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))\n" "int32x4_t vld1q_z(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))\n" "int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))\n" "int8x16_t vld1q_z(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))\n" "uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))\n" "uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))\n" "uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))\n" "uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))\n" "uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))\n" "uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))\n" "int16x8x2_t vld2q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))\n" "int16x8x2_t vld2q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))\n" "int32x4x2_t vld2q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))\n" "int32x4x2_t vld2q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))\n" "int8x16x2_t vld2q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))\n" "int8x16x2_t vld2q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))\n" "uint16x8x2_t vld2q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))\n" "uint16x8x2_t vld2q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))\n" "uint32x4x2_t vld2q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))\n" "uint32x4x2_t vld2q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))\n" "uint8x16x2_t vld2q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))\n" "uint8x16x2_t vld2q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))\n" "int16x8x4_t vld4q_s16(const int16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))\n" "int16x8x4_t vld4q(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))\n" "int32x4x4_t vld4q_s32(const int32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))\n" "int32x4x4_t vld4q(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))\n" "int8x16x4_t vld4q_s8(const int8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))\n" "int8x16x4_t vld4q(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))\n" "uint16x8x4_t vld4q_u16(const uint16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))\n" "uint16x8x4_t vld4q(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))\n" "uint32x4x4_t vld4q_u32(const uint32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))\n" "uint32x4x4_t vld4q(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))\n" "uint8x16x4_t vld4q_u8(const uint8_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))\n" "uint8x16x4_t vld4q(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))\n" "int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))\n" "int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))\n" "int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))\n" "int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))\n" "int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))\n" "int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))\n" "uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))\n" "uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))\n" "uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))\n" "uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))\n" "uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))\n" "uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))\n" "int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))\n" "int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))\n" "int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))\n" "int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))\n" "int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))\n" "int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))\n" "uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))\n" "uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))\n" "uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))\n" "uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))\n" "uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))\n" "uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))\n" "int16x8_t vldrbq_s16(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))\n" "int32x4_t vldrbq_s32(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))\n" "int8x16_t vldrbq_s8(const int8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))\n" "uint16x8_t vldrbq_u16(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))\n" "uint32x4_t vldrbq_u32(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))\n" "uint8x16_t vldrbq_u8(const uint8_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))\n" "int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))\n" "int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))\n" "int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))\n" "uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))\n" "uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))\n" "uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))\n" "int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))\n" "uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))\n" "int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))\n" "uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))\n" "int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))\n" "uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))\n" "int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))\n" "uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))\n" "int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))\n" "int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))\n" "uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))\n" "uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))\n" "int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))\n" "int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))\n" "uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))\n" "uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))\n" "int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))\n" "int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))\n" "uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))\n" "uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))\n" "int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))\n" "int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))\n" "uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))\n" "uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))\n" "int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))\n" "int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))\n" "int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))\n" "int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))\n" "uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))\n" "uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))\n" "uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))\n" "uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))\n" "int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))\n" "int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))\n" "int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))\n" "int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))\n" "uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))\n" "uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))\n" "uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))\n" "uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))\n" "int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))\n" "int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))\n" "int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))\n" "int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))\n" "uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))\n" "uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))\n" "uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))\n" "uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))\n" "int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))\n" "int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))\n" "int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))\n" "int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))\n" "uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))\n" "uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))\n" "uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))\n" "uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))\n" "int16x8_t vldrhq_s16(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))\n" "int32x4_t vldrhq_s32(const int16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))\n" "uint16x8_t vldrhq_u16(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))\n" "uint32x4_t vldrhq_u32(const uint16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))\n" "int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))\n" "int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))\n" "uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))\n" "uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))\n" "int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))\n" "uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))\n" "int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))\n" "uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))\n" "int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))\n" "uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))\n" "int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))\n" "uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))\n" "int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))\n" "int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))\n" "uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))\n" "uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))\n" "int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))\n" "int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))\n" "uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))\n" "uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))\n" "int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))\n" "int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))\n" "uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))\n" "uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))\n" "int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))\n" "int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))\n" "uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))\n" "uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))\n" "int32x4_t vldrwq_s32(const int32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))\n" "uint32x4_t vldrwq_u32(const uint32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))\n" "int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))\n" "uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))\n" "uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))\n" "uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))\n" "uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))\n" "uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))\n" "uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))\n" "uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))\n" "uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))\n" "uint16x8_t vmaxaq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))\n" "uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))\n" "uint32x4_t vmaxaq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))\n" "uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))\n" "uint8x16_t vmaxaq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))\n" "uint16_t vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))\n" "uint16_t vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))\n" "uint32_t vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))\n" "uint32_t vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))\n" "uint8_t vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))\n" "uint8_t vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))\n" "uint16_t vmaxavq_s16(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))\n" "uint16_t vmaxavq(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))\n" "uint32_t vmaxavq_s32(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))\n" "uint32_t vmaxavq(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))\n" "uint8_t vmaxavq_s8(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))\n" "uint8_t vmaxavq(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))\n" "int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))\n" "int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))\n" "int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))\n" "int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))\n" "int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))\n" "int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))\n" "uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))\n" "uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))\n" "uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))\n" "uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))\n" "uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))\n" "uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))\n" "int16x8_t vmaxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))\n" "int16x8_t vmaxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))\n" "int32x4_t vmaxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))\n" "int32x4_t vmaxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))\n" "int8x16_t vmaxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))\n" "int8x16_t vmaxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))\n" "uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))\n" "uint16x8_t vmaxq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))\n" "uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))\n" "uint32x4_t vmaxq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))\n" "uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))\n" "uint8x16_t vmaxq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))\n" "int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))\n" "int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))\n" "int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))\n" "int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))\n" "int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))\n" "int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))\n" "uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))\n" "uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))\n" "uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))\n" "uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))\n" "uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))\n" "uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))\n" "int16_t vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))\n" "int16_t vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))\n" "int32_t vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))\n" "int32_t vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))\n" "int8_t vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))\n" "int8_t vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))\n" "uint16_t vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))\n" "uint16_t vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))\n" "uint32_t vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))\n" "uint32_t vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))\n" "uint8_t vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))\n" "uint8_t vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))\n" "int16_t vmaxvq_s16(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))\n" "int16_t vmaxvq(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))\n" "int32_t vmaxvq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))\n" "int32_t vmaxvq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))\n" "int8_t vmaxvq_s8(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))\n" "int8_t vmaxvq(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))\n" "uint16_t vmaxvq_u16(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))\n" "uint16_t vmaxvq(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))\n" "uint32_t vmaxvq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))\n" "uint32_t vmaxvq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))\n" "uint8_t vmaxvq_u8(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))\n" "uint8_t vmaxvq(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))\n" "uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))\n" "uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))\n" "uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))\n" "uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))\n" "uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))\n" "uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))\n" "uint16x8_t vminaq_s16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))\n" "uint16x8_t vminaq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))\n" "uint32x4_t vminaq_s32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))\n" "uint32x4_t vminaq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))\n" "uint8x16_t vminaq_s8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))\n" "uint8x16_t vminaq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))\n" "uint16_t vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))\n" "uint16_t vminavq_p(uint16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))\n" "uint32_t vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))\n" "uint32_t vminavq_p(uint32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))\n" "uint8_t vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))\n" "uint8_t vminavq_p(uint8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))\n" "uint16_t vminavq_s16(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))\n" "uint16_t vminavq(uint16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))\n" "uint32_t vminavq_s32(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))\n" "uint32_t vminavq(uint32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))\n" "uint8_t vminavq_s8(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))\n" "uint8_t vminavq(uint8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))\n" "int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))\n" "int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))\n" "int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))\n" "int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))\n" "int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))\n" "int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))\n" "uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))\n" "uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))\n" "uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))\n" "uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))\n" "uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))\n" "uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))\n" "int16x8_t vminq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))\n" "int16x8_t vminq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))\n" "int32x4_t vminq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))\n" "int32x4_t vminq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))\n" "int8x16_t vminq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))\n" "int8x16_t vminq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))\n" "uint16x8_t vminq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))\n" "uint16x8_t vminq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))\n" "uint32x4_t vminq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))\n" "uint32x4_t vminq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))\n" "uint8x16_t vminq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))\n" "uint8x16_t vminq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))\n" "int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))\n" "int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))\n" "int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))\n" "int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))\n" "int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))\n" "int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))\n" "uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))\n" "uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))\n" "uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))\n" "uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))\n" "uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))\n" "uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))\n" "int16_t vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))\n" "int16_t vminvq_p(int16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))\n" "int32_t vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))\n" "int32_t vminvq_p(int32_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))\n" "int8_t vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))\n" "int8_t vminvq_p(int8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))\n" "uint16_t vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))\n" "uint16_t vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))\n" "uint32_t vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))\n" "uint32_t vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))\n" "uint8_t vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))\n" "uint8_t vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))\n" "int16_t vminvq_s16(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))\n" "int16_t vminvq(int16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))\n" "int32_t vminvq_s32(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))\n" "int32_t vminvq(int32_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))\n" "int8_t vminvq_s8(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))\n" "int8_t vminvq(int8_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))\n" "uint16_t vminvq_u16(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))\n" "uint16_t vminvq(uint16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))\n" "uint32_t vminvq_u32(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))\n" "uint32_t vminvq(uint32_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))\n" "uint8_t vminvq_u8(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))\n" "uint8_t vminvq(uint8_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))\n" "int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))\n" "int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))\n" "int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))\n" "int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))\n" "int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))\n" "int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))\n" "uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))\n" "uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))\n" "uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))\n" "uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))\n" "uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))\n" "uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))\n" "int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))\n" "int32_t vmladavaq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))\n" "int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))\n" "int32_t vmladavaq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))\n" "int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))\n" "int32_t vmladavaq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))\n" "uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))\n" "uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))\n" "uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))\n" "uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))\n" "uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))\n" "uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))\n" "int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))\n" "int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))\n" "int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))\n" "int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))\n" "int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))\n" "int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))\n" "int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))\n" "int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))\n" "int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))\n" "int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))\n" "int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))\n" "int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))\n" "int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))\n" "int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))\n" "int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))\n" "int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))\n" "int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))\n" "int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))\n" "uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))\n" "uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))\n" "uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))\n" "uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))\n" "uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))\n" "uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))\n" "int32_t vmladavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))\n" "int32_t vmladavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))\n" "int32_t vmladavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))\n" "int32_t vmladavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))\n" "int32_t vmladavq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))\n" "int32_t vmladavq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))\n" "uint32_t vmladavq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))\n" "uint32_t vmladavq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))\n" "uint32_t vmladavq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))\n" "uint32_t vmladavq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))\n" "uint32_t vmladavq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))\n" "uint32_t vmladavq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))\n" "int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))\n" "int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))\n" "int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))\n" "int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))\n" "int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))\n" "int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))\n" "int32_t vmladavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))\n" "int32_t vmladavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))\n" "int32_t vmladavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))\n" "int32_t vmladavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))\n" "int32_t vmladavxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))\n" "int32_t vmladavxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))\n" "int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))\n" "int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))\n" "int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))\n" "int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))\n" "uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))\n" "uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))\n" "uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))\n" "uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))\n" "int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))\n" "int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))\n" "int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))\n" "int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))\n" "uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))\n" "uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))\n" "uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))\n" "uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))\n" "int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))\n" "int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))\n" "int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))\n" "int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))\n" "int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))\n" "int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))\n" "int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))\n" "int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))\n" "int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))\n" "int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))\n" "int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))\n" "int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))\n" "uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))\n" "uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))\n" "uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))\n" "uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))\n" "int64_t vmlaldavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))\n" "int64_t vmlaldavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))\n" "int64_t vmlaldavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))\n" "int64_t vmlaldavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))\n" "uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))\n" "uint64_t vmlaldavq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))\n" "uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))\n" "uint64_t vmlaldavq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))\n" "int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))\n" "int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))\n" "int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))\n" "int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))\n" "int64_t vmlaldavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))\n" "int64_t vmlaldavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))\n" "int64_t vmlaldavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))\n" "int64_t vmlaldavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))\n" "int16x8_t vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))\n" "int16x8_t vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))\n" "int32x4_t vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))\n" "int32x4_t vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))\n" "int8x16_t vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))\n" "int8x16_t vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))\n" "uint16x8_t vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))\n" "uint16x8_t vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))\n" "uint32x4_t vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))\n" "uint32x4_t vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))\n" "uint8x16_t vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))\n" "uint8x16_t vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))\n" "int16x8_t vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))\n" "int16x8_t vmlaq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))\n" "int32x4_t vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))\n" "int32x4_t vmlaq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))\n" "int8x16_t vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))\n" "int8x16_t vmlaq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))\n" "uint16x8_t vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))\n" "uint16x8_t vmlaq(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))\n" "uint32x4_t vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))\n" "uint32x4_t vmlaq(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))\n" "uint8x16_t vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))\n" "uint8x16_t vmlaq(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))\n" "int16x8_t vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))\n" "int16x8_t vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))\n" "int32x4_t vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))\n" "int32x4_t vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))\n" "int8x16_t vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))\n" "int8x16_t vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))\n" "uint16x8_t vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))\n" "uint16x8_t vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))\n" "uint32x4_t vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))\n" "uint32x4_t vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))\n" "uint8x16_t vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))\n" "uint8x16_t vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))\n" "int16x8_t vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))\n" "int16x8_t vmlasq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))\n" "int32x4_t vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))\n" "int32x4_t vmlasq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))\n" "int8x16_t vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))\n" "int8x16_t vmlasq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))\n" "uint16x8_t vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))\n" "uint16x8_t vmlasq(uint16x8_t, uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))\n" "uint32x4_t vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))\n" "uint32x4_t vmlasq(uint32x4_t, uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))\n" "uint8x16_t vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))\n" "uint8x16_t vmlasq(uint8x16_t, uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))\n" "int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))\n" "int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))\n" "int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))\n" "int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))\n" "int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))\n" "int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))\n" "int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))\n" "int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))\n" "int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))\n" "int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))\n" "int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))\n" "int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))\n" "int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))\n" "int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))\n" "int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))\n" "int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))\n" "int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))\n" "int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))\n" "int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))\n" "int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))\n" "int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))\n" "int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))\n" "int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))\n" "int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))\n" "int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))\n" "int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))\n" "int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))\n" "int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))\n" "int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))\n" "int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))\n" "int32_t vmlsdavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))\n" "int32_t vmlsdavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))\n" "int32_t vmlsdavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))\n" "int32_t vmlsdavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))\n" "int32_t vmlsdavq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))\n" "int32_t vmlsdavq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))\n" "int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))\n" "int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))\n" "int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))\n" "int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))\n" "int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))\n" "int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))\n" "int32_t vmlsdavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))\n" "int32_t vmlsdavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))\n" "int32_t vmlsdavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))\n" "int32_t vmlsdavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))\n" "int32_t vmlsdavxq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))\n" "int32_t vmlsdavxq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))\n" "int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))\n" "int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))\n" "int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))\n" "int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))\n" "int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))\n" "int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))\n" "int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))\n" "int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))\n" "int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))\n" "int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))\n" "int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))\n" "int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))\n" "int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))\n" "int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))\n" "int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))\n" "int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))\n" "int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))\n" "int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))\n" "int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))\n" "int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))\n" "int64_t vmlsldavq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))\n" "int64_t vmlsldavq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))\n" "int64_t vmlsldavq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))\n" "int64_t vmlsldavq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))\n" "int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))\n" "int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))\n" "int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))\n" "int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))\n" "int64_t vmlsldavxq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))\n" "int64_t vmlsldavxq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))\n" "int64_t vmlsldavxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))\n" "int64_t vmlsldavxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))\n" "int32x4_t vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))\n" "int32x4_t vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))\n" "int16x8_t vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))\n" "int16x8_t vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))\n" "uint32x4_t vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))\n" "uint32x4_t vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))\n" "uint16x8_t vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))\n" "uint16x8_t vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))\n" "int32x4_t vmovlbq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))\n" "int32x4_t vmovlbq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))\n" "int16x8_t vmovlbq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))\n" "int16x8_t vmovlbq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))\n" "uint32x4_t vmovlbq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))\n" "uint32x4_t vmovlbq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))\n" "uint16x8_t vmovlbq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))\n" "uint16x8_t vmovlbq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))\n" "int32x4_t vmovlbq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))\n" "int32x4_t vmovlbq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))\n" "int16x8_t vmovlbq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))\n" "int16x8_t vmovlbq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))\n" "uint32x4_t vmovlbq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))\n" "uint32x4_t vmovlbq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))\n" "uint16x8_t vmovlbq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))\n" "uint16x8_t vmovlbq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))\n" "int32x4_t vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))\n" "int32x4_t vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))\n" "int16x8_t vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))\n" "int16x8_t vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))\n" "uint32x4_t vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))\n" "uint32x4_t vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))\n" "uint16x8_t vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))\n" "uint16x8_t vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))\n" "int32x4_t vmovltq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))\n" "int32x4_t vmovltq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))\n" "int16x8_t vmovltq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))\n" "int16x8_t vmovltq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))\n" "uint32x4_t vmovltq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))\n" "uint32x4_t vmovltq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))\n" "uint16x8_t vmovltq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))\n" "uint16x8_t vmovltq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))\n" "int32x4_t vmovltq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))\n" "int32x4_t vmovltq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))\n" "int16x8_t vmovltq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))\n" "int16x8_t vmovltq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))\n" "uint32x4_t vmovltq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))\n" "uint32x4_t vmovltq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))\n" "uint16x8_t vmovltq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))\n" "uint16x8_t vmovltq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))\n" "int8x16_t vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))\n" "int8x16_t vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))\n" "int16x8_t vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))\n" "int16x8_t vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))\n" "uint8x16_t vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))\n" "uint8x16_t vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))\n" "uint16x8_t vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))\n" "uint16x8_t vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))\n" "int8x16_t vmovnbq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))\n" "int8x16_t vmovnbq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))\n" "int16x8_t vmovnbq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))\n" "int16x8_t vmovnbq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))\n" "uint8x16_t vmovnbq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))\n" "uint8x16_t vmovnbq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))\n" "uint16x8_t vmovnbq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))\n" "uint16x8_t vmovnbq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))\n" "int8x16_t vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))\n" "int8x16_t vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))\n" "int16x8_t vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))\n" "int16x8_t vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))\n" "uint8x16_t vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))\n" "uint8x16_t vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))\n" "uint16x8_t vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))\n" "uint16x8_t vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))\n" "int8x16_t vmovntq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))\n" "int8x16_t vmovntq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))\n" "int16x8_t vmovntq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))\n" "int16x8_t vmovntq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))\n" "uint8x16_t vmovntq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))\n" "uint8x16_t vmovntq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))\n" "uint16x8_t vmovntq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))\n" "uint16x8_t vmovntq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))\n" "int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))\n" "int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))\n" "int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))\n" "int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))\n" "int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))\n" "int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))\n" "uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))\n" "uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))\n" "uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))\n" "uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))\n" "uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))\n" "uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))\n" "int16x8_t vmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))\n" "int16x8_t vmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))\n" "int32x4_t vmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))\n" "int32x4_t vmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))\n" "int8x16_t vmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))\n" "int8x16_t vmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))\n" "uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))\n" "uint16x8_t vmulhq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))\n" "uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))\n" "uint32x4_t vmulhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))\n" "uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))\n" "uint8x16_t vmulhq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))\n" "int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))\n" "int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))\n" "int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))\n" "int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))\n" "int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))\n" "int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))\n" "uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))\n" "uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))\n" "uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))\n" "uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))\n" "uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))\n" "uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))\n" "int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))\n" "int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))\n" "int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))\n" "int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))\n" "int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))\n" "int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))\n" "uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))\n" "uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))\n" "uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))\n" "uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))\n" "uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))\n" "uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))\n" "int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))\n" "int32x4_t vmullbq_int(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))\n" "int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))\n" "int64x2_t vmullbq_int(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))\n" "int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))\n" "int16x8_t vmullbq_int(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))\n" "uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))\n" "uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))\n" "uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))\n" "uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))\n" "uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))\n" "uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))\n" "int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))\n" "int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))\n" "int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))\n" "int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))\n" "int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))\n" "int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))\n" "uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))\n" "uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))\n" "uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))\n" "uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))\n" "uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))\n" "uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))\n" "uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))\n" "uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))\n" "uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))\n" "uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))\n" "uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))\n" "uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))\n" "uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))\n" "uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))\n" "uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))\n" "uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))\n" "uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))\n" "uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))\n" "int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))\n" "int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))\n" "int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))\n" "int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))\n" "int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))\n" "int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))\n" "uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))\n" "uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))\n" "uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))\n" "uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))\n" "uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))\n" "uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))\n" "int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))\n" "int32x4_t vmulltq_int(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))\n" "int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))\n" "int64x2_t vmulltq_int(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))\n" "int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))\n" "int16x8_t vmulltq_int(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))\n" "uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))\n" "uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))\n" "uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))\n" "uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))\n" "uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))\n" "uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))\n" "int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))\n" "int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))\n" "int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))\n" "int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))\n" "int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))\n" "int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))\n" "uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))\n" "uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))\n" "uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))\n" "uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))\n" "uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))\n" "uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))\n" "uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))\n" "uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))\n" "uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))\n" "uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))\n" "uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))\n" "uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))\n" "uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))\n" "uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))\n" "uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))\n" "uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))\n" "uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))\n" "uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))\n" "int16x8_t vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))\n" "int16x8_t vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))\n" "int32x4_t vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))\n" "int32x4_t vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))\n" "int8x16_t vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))\n" "int8x16_t vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))\n" "uint16x8_t vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))\n" "uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))\n" "uint32x4_t vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))\n" "uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))\n" "uint8x16_t vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))\n" "uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))\n" "int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))\n" "int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))\n" "int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))\n" "int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))\n" "int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))\n" "int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))\n" "uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))\n" "uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))\n" "uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))\n" "uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))\n" "uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))\n" "uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))\n" "int16x8_t vmulq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))\n" "int16x8_t vmulq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))\n" "int32x4_t vmulq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))\n" "int32x4_t vmulq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))\n" "int8x16_t vmulq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))\n" "int8x16_t vmulq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))\n" "uint16x8_t vmulq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))\n" "uint16x8_t vmulq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))\n" "uint32x4_t vmulq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))\n" "uint32x4_t vmulq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))\n" "uint8x16_t vmulq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))\n" "uint8x16_t vmulq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))\n" "int16x8_t vmulq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))\n" "int16x8_t vmulq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))\n" "int32x4_t vmulq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))\n" "int32x4_t vmulq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))\n" "int8x16_t vmulq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))\n" "int8x16_t vmulq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))\n" "uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))\n" "uint16x8_t vmulq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))\n" "uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))\n" "uint32x4_t vmulq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))\n" "uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))\n" "uint8x16_t vmulq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))\n" "int16x8_t vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))\n" "int16x8_t vmulq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))\n" "int32x4_t vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))\n" "int32x4_t vmulq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))\n" "int8x16_t vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))\n" "int8x16_t vmulq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))\n" "uint16x8_t vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))\n" "uint16x8_t vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))\n" "uint32x4_t vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))\n" "uint32x4_t vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))\n" "uint8x16_t vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))\n" "uint8x16_t vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))\n" "int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))\n" "int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))\n" "int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))\n" "int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))\n" "int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))\n" "int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))\n" "uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))\n" "uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))\n" "uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))\n" "uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))\n" "uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))\n" "uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))\n" "int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))\n" "int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))\n" "int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))\n" "int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))\n" "uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))\n" "uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))\n" "uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))\n" "uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))\n" "int16x8_t vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))\n" "int16x8_t vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))\n" "int32x4_t vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))\n" "int32x4_t vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))\n" "int8x16_t vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))\n" "int8x16_t vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))\n" "uint16x8_t vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))\n" "uint16x8_t vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))\n" "uint32x4_t vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))\n" "uint32x4_t vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))\n" "uint8x16_t vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))\n" "uint8x16_t vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))\n" "int16x8_t vmvnq_n_s16(int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))\n" "int32x4_t vmvnq_n_s32(int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))\n" "uint16x8_t vmvnq_n_u16(uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))\n" "uint32x4_t vmvnq_n_u32(uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))\n" "int16x8_t vmvnq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))\n" "int16x8_t vmvnq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))\n" "int32x4_t vmvnq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))\n" "int32x4_t vmvnq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))\n" "int8x16_t vmvnq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))\n" "int8x16_t vmvnq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))\n" "uint16x8_t vmvnq_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))\n" "uint16x8_t vmvnq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))\n" "uint32x4_t vmvnq_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))\n" "uint32x4_t vmvnq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))\n" "uint8x16_t vmvnq_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))\n" "uint8x16_t vmvnq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))\n" "int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))\n" "int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))\n" "uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))\n" "uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))\n" "int16x8_t vmvnq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))\n" "int16x8_t vmvnq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))\n" "int32x4_t vmvnq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))\n" "int32x4_t vmvnq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))\n" "int8x16_t vmvnq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))\n" "int8x16_t vmvnq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))\n" "uint16x8_t vmvnq_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))\n" "uint16x8_t vmvnq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))\n" "uint32x4_t vmvnq_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))\n" "uint32x4_t vmvnq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))\n" "uint8x16_t vmvnq_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))\n" "uint8x16_t vmvnq_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))\n" "int16x8_t vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))\n" "int16x8_t vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))\n" "int32x4_t vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))\n" "int32x4_t vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))\n" "int8x16_t vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))\n" "int8x16_t vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))\n" "int16x8_t vnegq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))\n" "int16x8_t vnegq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))\n" "int32x4_t vnegq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))\n" "int32x4_t vnegq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))\n" "int8x16_t vnegq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))\n" "int8x16_t vnegq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))\n" "int16x8_t vnegq_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))\n" "int16x8_t vnegq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))\n" "int32x4_t vnegq_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))\n" "int32x4_t vnegq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))\n" "int8x16_t vnegq_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))\n" "int8x16_t vnegq_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))\n" "int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))\n" "int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))\n" "int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))\n" "int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))\n" "int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))\n" "int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))\n" "uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))\n" "uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))\n" "uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))\n" "uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))\n" "uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))\n" "uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))\n" "int16x8_t vornq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))\n" "int16x8_t vornq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))\n" "int32x4_t vornq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))\n" "int32x4_t vornq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))\n" "int8x16_t vornq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))\n" "int8x16_t vornq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))\n" "uint16x8_t vornq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))\n" "uint16x8_t vornq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))\n" "uint32x4_t vornq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))\n" "uint32x4_t vornq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))\n" "uint8x16_t vornq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))\n" "uint8x16_t vornq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))\n" "int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))\n" "int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))\n" "int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))\n" "int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))\n" "int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))\n" "int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))\n" "uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))\n" "uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))\n" "uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))\n" "uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))\n" "uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))\n" "uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))\n" "int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))\n" "int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))\n" "int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))\n" "int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))\n" "uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))\n" "uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))\n" "uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))\n" "uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))\n" "int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))\n" "int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))\n" "int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))\n" "int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))\n" "int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))\n" "int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))\n" "uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))\n" "uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))\n" "uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))\n" "uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))\n" "uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))\n" "uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))\n" "int16x8_t vorrq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))\n" "int16x8_t vorrq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))\n" "int32x4_t vorrq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))\n" "int32x4_t vorrq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))\n" "uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))\n" "uint16x8_t vorrq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))\n" "uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))\n" "uint32x4_t vorrq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))\n" "int16x8_t vorrq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))\n" "int16x8_t vorrq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))\n" "int32x4_t vorrq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))\n" "int32x4_t vorrq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))\n" "int8x16_t vorrq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))\n" "int8x16_t vorrq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))\n" "uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))\n" "uint16x8_t vorrq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))\n" "uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))\n" "uint32x4_t vorrq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))\n" "uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))\n" "uint8x16_t vorrq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))\n" "int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))\n" "int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))\n" "int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))\n" "int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))\n" "int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))\n" "int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))\n" "uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))\n" "uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))\n" "uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))\n" "uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))\n" "uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))\n" "uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))\n" "mve_pred16_t vpnot(mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))\n" "int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))\n" "int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))\n" "int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))\n" "int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))\n" "int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))\n" "int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))\n" "int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))\n" "int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))\n" "uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))\n" "uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))\n" "uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))\n" "uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))\n" "uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))\n" "uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))\n" "uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))\n" "uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))\n" "int16x8_t vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))\n" "int16x8_t vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))\n" "int32x4_t vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))\n" "int32x4_t vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))\n" "int8x16_t vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))\n" "int8x16_t vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))\n" "int16x8_t vqabsq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))\n" "int16x8_t vqabsq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))\n" "int32x4_t vqabsq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))\n" "int32x4_t vqabsq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))\n" "int8x16_t vqabsq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))\n" "int8x16_t vqabsq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))\n" "int16x8_t vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))\n" "int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))\n" "int32x4_t vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))\n" "int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))\n" "int8x16_t vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))\n" "int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))\n" "uint16x8_t vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))\n" "uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))\n" "uint32x4_t vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))\n" "uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))\n" "uint8x16_t vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))\n" "uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))\n" "int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))\n" "int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))\n" "int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))\n" "int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))\n" "int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))\n" "int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))\n" "uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))\n" "uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))\n" "uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))\n" "uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))\n" "uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))\n" "uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))\n" "int16x8_t vqaddq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))\n" "int16x8_t vqaddq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))\n" "int32x4_t vqaddq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))\n" "int32x4_t vqaddq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))\n" "int8x16_t vqaddq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))\n" "int8x16_t vqaddq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))\n" "uint16x8_t vqaddq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))\n" "uint16x8_t vqaddq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))\n" "uint32x4_t vqaddq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))\n" "uint32x4_t vqaddq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))\n" "uint8x16_t vqaddq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))\n" "uint8x16_t vqaddq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))\n" "int16x8_t vqaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))\n" "int16x8_t vqaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))\n" "int32x4_t vqaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))\n" "int32x4_t vqaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))\n" "int8x16_t vqaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))\n" "int8x16_t vqaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))\n" "uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))\n" "uint16x8_t vqaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))\n" "uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))\n" "uint32x4_t vqaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))\n" "uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))\n" "uint8x16_t vqaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))\n" "int16x8_t vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))\n" "int16x8_t vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))\n" "int32x4_t vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))\n" "int32x4_t vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))\n" "int8x16_t vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))\n" "int8x16_t vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))\n" "int16x8_t vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))\n" "int16x8_t vqdmladhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))\n" "int32x4_t vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))\n" "int32x4_t vqdmladhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))\n" "int8x16_t vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))\n" "int8x16_t vqdmladhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))\n" "int16x8_t vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))\n" "int16x8_t vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))\n" "int32x4_t vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))\n" "int32x4_t vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))\n" "int8x16_t vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))\n" "int8x16_t vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))\n" "int16x8_t vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))\n" "int16x8_t vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))\n" "int32x4_t vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))\n" "int32x4_t vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))\n" "int8x16_t vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))\n" "int8x16_t vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))\n" "int16x8_t vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))\n" "int16x8_t vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))\n" "int32x4_t vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))\n" "int32x4_t vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))\n" "int8x16_t vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))\n" "int8x16_t vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))\n" "int16x8_t vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))\n" "int16x8_t vqdmlahq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))\n" "int32x4_t vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))\n" "int32x4_t vqdmlahq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))\n" "int8x16_t vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))\n" "int8x16_t vqdmlahq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))\n" "int16x8_t vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))\n" "int16x8_t vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))\n" "int32x4_t vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))\n" "int32x4_t vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))\n" "int8x16_t vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))\n" "int8x16_t vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))\n" "int16x8_t vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))\n" "int16x8_t vqdmlashq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))\n" "int32x4_t vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))\n" "int32x4_t vqdmlashq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))\n" "int8x16_t vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))\n" "int8x16_t vqdmlashq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))\n" "int16x8_t vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))\n" "int16x8_t vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))\n" "int32x4_t vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))\n" "int32x4_t vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))\n" "int8x16_t vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))\n" "int8x16_t vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))\n" "int16x8_t vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))\n" "int16x8_t vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))\n" "int32x4_t vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))\n" "int32x4_t vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))\n" "int8x16_t vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))\n" "int8x16_t vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))\n" "int16x8_t vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))\n" "int16x8_t vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))\n" "int32x4_t vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))\n" "int32x4_t vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))\n" "int8x16_t vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))\n" "int8x16_t vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))\n" "int16x8_t vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))\n" "int16x8_t vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))\n" "int32x4_t vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))\n" "int32x4_t vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))\n" "int8x16_t vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))\n" "int8x16_t vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))\n" "int16x8_t vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))\n" "int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))\n" "int32x4_t vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))\n" "int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))\n" "int8x16_t vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))\n" "int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))\n" "int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))\n" "int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))\n" "int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))\n" "int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))\n" "int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))\n" "int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))\n" "int16x8_t vqdmulhq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))\n" "int16x8_t vqdmulhq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))\n" "int32x4_t vqdmulhq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))\n" "int32x4_t vqdmulhq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))\n" "int8x16_t vqdmulhq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))\n" "int8x16_t vqdmulhq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))\n" "int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))\n" "int16x8_t vqdmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))\n" "int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))\n" "int32x4_t vqdmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))\n" "int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))\n" "int8x16_t vqdmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))\n" "int32x4_t vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))\n" "int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))\n" "int64x2_t vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))\n" "int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))\n" "int32x4_t vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))\n" "int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))\n" "int64x2_t vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))\n" "int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))\n" "int32x4_t vqdmullbq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))\n" "int32x4_t vqdmullbq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))\n" "int64x2_t vqdmullbq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))\n" "int64x2_t vqdmullbq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))\n" "int32x4_t vqdmullbq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))\n" "int32x4_t vqdmullbq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))\n" "int64x2_t vqdmullbq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))\n" "int64x2_t vqdmullbq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))\n" "int32x4_t vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))\n" "int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))\n" "int64x2_t vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))\n" "int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))\n" "int32x4_t vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))\n" "int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))\n" "int64x2_t vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))\n" "int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))\n" "int32x4_t vqdmulltq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))\n" "int32x4_t vqdmulltq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))\n" "int64x2_t vqdmulltq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))\n" "int64x2_t vqdmulltq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))\n" "int32x4_t vqdmulltq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))\n" "int32x4_t vqdmulltq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))\n" "int64x2_t vqdmulltq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))\n" "int64x2_t vqdmulltq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))\n" "int8x16_t vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))\n" "int8x16_t vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))\n" "int16x8_t vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))\n" "int16x8_t vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))\n" "uint8x16_t vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))\n" "uint8x16_t vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))\n" "uint16x8_t vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))\n" "uint16x8_t vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))\n" "int8x16_t vqmovnbq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))\n" "int8x16_t vqmovnbq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))\n" "int16x8_t vqmovnbq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))\n" "int16x8_t vqmovnbq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))\n" "uint8x16_t vqmovnbq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))\n" "uint8x16_t vqmovnbq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))\n" "uint16x8_t vqmovnbq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))\n" "uint16x8_t vqmovnbq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))\n" "int8x16_t vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))\n" "int8x16_t vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))\n" "int16x8_t vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))\n" "int16x8_t vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))\n" "uint8x16_t vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))\n" "uint8x16_t vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))\n" "uint16x8_t vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))\n" "uint16x8_t vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))\n" "int8x16_t vqmovntq_s16(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))\n" "int8x16_t vqmovntq(int8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))\n" "int16x8_t vqmovntq_s32(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))\n" "int16x8_t vqmovntq(int16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))\n" "uint8x16_t vqmovntq_u16(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))\n" "uint8x16_t vqmovntq(uint8x16_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))\n" "uint16x8_t vqmovntq_u32(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))\n" "uint16x8_t vqmovntq(uint16x8_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))\n" "uint8x16_t vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))\n" "uint8x16_t vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))\n" "uint16x8_t vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))\n" "uint16x8_t vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))\n" "uint8x16_t vqmovunbq_s16(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))\n" "uint8x16_t vqmovunbq(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))\n" "uint16x8_t vqmovunbq_s32(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))\n" "uint16x8_t vqmovunbq(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))\n" "uint8x16_t vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))\n" "uint8x16_t vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))\n" "uint16x8_t vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))\n" "uint16x8_t vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))\n" "uint8x16_t vqmovuntq_s16(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))\n" "uint8x16_t vqmovuntq(uint8x16_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))\n" "uint16x8_t vqmovuntq_s32(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))\n" "uint16x8_t vqmovuntq(uint16x8_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))\n" "int16x8_t vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))\n" "int16x8_t vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))\n" "int32x4_t vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))\n" "int32x4_t vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))\n" "int8x16_t vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))\n" "int8x16_t vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))\n" "int16x8_t vqnegq_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))\n" "int16x8_t vqnegq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))\n" "int32x4_t vqnegq_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))\n" "int32x4_t vqnegq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))\n" "int8x16_t vqnegq_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))\n" "int8x16_t vqnegq(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))\n" "int16x8_t vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))\n" "int16x8_t vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))\n" "int32x4_t vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))\n" "int32x4_t vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))\n" "int8x16_t vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))\n" "int8x16_t vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))\n" "int16x8_t vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))\n" "int16x8_t vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))\n" "int32x4_t vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))\n" "int32x4_t vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))\n" "int8x16_t vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))\n" "int8x16_t vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))\n" "int16x8_t vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))\n" "int16x8_t vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))\n" "int32x4_t vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))\n" "int32x4_t vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))\n" "int8x16_t vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))\n" "int8x16_t vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))\n" "int16x8_t vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))\n" "int16x8_t vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))\n" "int32x4_t vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))\n" "int32x4_t vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))\n" "int8x16_t vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))\n" "int8x16_t vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))\n" "int16x8_t vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))\n" "int16x8_t vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))\n" "int32x4_t vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))\n" "int32x4_t vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))\n" "int8x16_t vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))\n" "int8x16_t vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))\n" "int16x8_t vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))\n" "int16x8_t vqrdmlahq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))\n" "int32x4_t vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))\n" "int32x4_t vqrdmlahq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))\n" "int8x16_t vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))\n" "int8x16_t vqrdmlahq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))\n" "int16x8_t vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))\n" "int16x8_t vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))\n" "int32x4_t vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))\n" "int32x4_t vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))\n" "int8x16_t vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))\n" "int8x16_t vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))\n" "int16x8_t vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))\n" "int16x8_t vqrdmlashq(int16x8_t, int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))\n" "int32x4_t vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))\n" "int32x4_t vqrdmlashq(int32x4_t, int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))\n" "int8x16_t vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))\n" "int8x16_t vqrdmlashq(int8x16_t, int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))\n" "int16x8_t vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))\n" "int16x8_t vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))\n" "int32x4_t vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))\n" "int32x4_t vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))\n" "int8x16_t vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))\n" "int8x16_t vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))\n" "int16x8_t vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))\n" "int16x8_t vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))\n" "int32x4_t vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))\n" "int32x4_t vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))\n" "int8x16_t vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))\n" "int8x16_t vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))\n" "int16x8_t vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))\n" "int16x8_t vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))\n" "int32x4_t vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))\n" "int32x4_t vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))\n" "int8x16_t vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))\n" "int8x16_t vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))\n" "int16x8_t vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))\n" "int16x8_t vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))\n" "int32x4_t vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))\n" "int32x4_t vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))\n" "int8x16_t vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))\n" "int8x16_t vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))\n" "int16x8_t vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))\n" "int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))\n" "int32x4_t vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))\n" "int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))\n" "int8x16_t vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))\n" "int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))\n" "int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))\n" "int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))\n" "int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))\n" "int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))\n" "int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))\n" "int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))\n" "int16x8_t vqrdmulhq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))\n" "int16x8_t vqrdmulhq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))\n" "int32x4_t vqrdmulhq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))\n" "int32x4_t vqrdmulhq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))\n" "int8x16_t vqrdmulhq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))\n" "int8x16_t vqrdmulhq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))\n" "int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))\n" "int16x8_t vqrdmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))\n" "int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))\n" "int32x4_t vqrdmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))\n" "int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))\n" "int8x16_t vqrdmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))\n" "int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))\n" "int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))\n" "int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))\n" "int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))\n" "int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))\n" "int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))\n" "uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))\n" "uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))\n" "uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))\n" "uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))\n" "uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))\n" "uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))\n" "int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))\n" "int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))\n" "int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))\n" "int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))\n" "int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))\n" "int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))\n" "uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))\n" "uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))\n" "uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))\n" "uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))\n" "uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))\n" "uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))\n" "int16x8_t vqrshlq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))\n" "int16x8_t vqrshlq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))\n" "int32x4_t vqrshlq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))\n" "int32x4_t vqrshlq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))\n" "int8x16_t vqrshlq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))\n" "int8x16_t vqrshlq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))\n" "uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))\n" "uint16x8_t vqrshlq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))\n" "uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))\n" "uint32x4_t vqrshlq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))\n" "uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))\n" "uint8x16_t vqrshlq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))\n" "int16x8_t vqrshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))\n" "int16x8_t vqrshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))\n" "int32x4_t vqrshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))\n" "int32x4_t vqrshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))\n" "int8x16_t vqrshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))\n" "int8x16_t vqrshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))\n" "uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))\n" "uint16x8_t vqrshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))\n" "uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))\n" "uint32x4_t vqrshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))\n" "uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))\n" "uint8x16_t vqrshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))\n" "int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))\n" "int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))\n" "int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))\n" "int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))\n" "uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))\n" "uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))\n" "uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))\n" "uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))\n" "int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))\n" "int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))\n" "int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))\n" "int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))\n" "uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))\n" "uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))\n" "uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))\n" "uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))\n" "int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))\n" "int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))\n" "int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))\n" "int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))\n" "uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))\n" "uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))\n" "uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))\n" "uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))\n" "int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))\n" "int8x16_t vqrshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))\n" "int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))\n" "int16x8_t vqrshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))\n" "uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))\n" "uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))\n" "uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))\n" "uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))\n" "uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))\n" "uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))\n" "uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))\n" "uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))\n" "uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))\n" "uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))\n" "uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))\n" "uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))\n" "uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))\n" "uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))\n" "uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))\n" "uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))\n" "uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))\n" "uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))\n" "uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))\n" "uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))\n" "int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))\n" "int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))\n" "int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))\n" "int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))\n" "int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))\n" "int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))\n" "uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))\n" "uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))\n" "uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))\n" "uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))\n" "uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))\n" "uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))\n" "int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))\n" "int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))\n" "int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))\n" "int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))\n" "int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))\n" "int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))\n" "uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))\n" "uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))\n" "uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))\n" "uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))\n" "uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))\n" "uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))\n" "int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))\n" "int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))\n" "int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))\n" "int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))\n" "int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))\n" "int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))\n" "uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))\n" "uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))\n" "uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))\n" "uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))\n" "uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))\n" "uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))\n" "int16x8_t vqshlq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))\n" "int16x8_t vqshlq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))\n" "int32x4_t vqshlq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))\n" "int32x4_t vqshlq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))\n" "int8x16_t vqshlq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))\n" "int8x16_t vqshlq_n(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))\n" "uint16x8_t vqshlq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))\n" "uint16x8_t vqshlq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))\n" "uint32x4_t vqshlq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))\n" "uint32x4_t vqshlq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))\n" "uint8x16_t vqshlq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))\n" "uint8x16_t vqshlq_n(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))\n" "int16x8_t vqshlq_r_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))\n" "int16x8_t vqshlq_r(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))\n" "int32x4_t vqshlq_r_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))\n" "int32x4_t vqshlq_r(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))\n" "int8x16_t vqshlq_r_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))\n" "int8x16_t vqshlq_r(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))\n" "uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))\n" "uint16x8_t vqshlq_r(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))\n" "uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))\n" "uint32x4_t vqshlq_r(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))\n" "uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))\n" "uint8x16_t vqshlq_r(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))\n" "int16x8_t vqshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))\n" "int16x8_t vqshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))\n" "int32x4_t vqshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))\n" "int32x4_t vqshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))\n" "int8x16_t vqshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))\n" "int8x16_t vqshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))\n" "uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))\n" "uint16x8_t vqshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))\n" "uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))\n" "uint32x4_t vqshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))\n" "uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))\n" "uint8x16_t vqshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))\n" "uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))\n" "uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))\n" "uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))\n" "uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))\n" "uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))\n" "uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))\n" "uint16x8_t vqshluq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))\n" "uint16x8_t vqshluq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))\n" "uint32x4_t vqshluq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))\n" "uint32x4_t vqshluq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))\n" "uint8x16_t vqshluq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))\n" "uint8x16_t vqshluq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))\n" "int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))\n" "int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))\n" "int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))\n" "int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))\n" "uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))\n" "uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))\n" "uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))\n" "uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))\n" "int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))\n" "int8x16_t vqshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))\n" "int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))\n" "int16x8_t vqshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))\n" "uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))\n" "uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))\n" "uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))\n" "uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))\n" "int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))\n" "int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))\n" "int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))\n" "int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))\n" "uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))\n" "uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))\n" "uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))\n" "uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))\n" "int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))\n" "int8x16_t vqshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))\n" "int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))\n" "int16x8_t vqshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))\n" "uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))\n" "uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))\n" "uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))\n" "uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))\n" "uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))\n" "uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))\n" "uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))\n" "uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))\n" "uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))\n" "uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))\n" "uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))\n" "uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))\n" "uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))\n" "uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))\n" "uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))\n" "uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))\n" "uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))\n" "uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))\n" "uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))\n" "uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))\n" "int16x8_t vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))\n" "int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))\n" "int32x4_t vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))\n" "int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))\n" "int8x16_t vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))\n" "int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))\n" "uint16x8_t vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))\n" "uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))\n" "uint32x4_t vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))\n" "uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))\n" "uint8x16_t vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))\n" "uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))\n" "int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))\n" "int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))\n" "int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))\n" "int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))\n" "int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))\n" "int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))\n" "uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))\n" "uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))\n" "uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))\n" "uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))\n" "uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))\n" "uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))\n" "int16x8_t vqsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))\n" "int16x8_t vqsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))\n" "int32x4_t vqsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))\n" "int32x4_t vqsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))\n" "int8x16_t vqsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))\n" "int8x16_t vqsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))\n" "uint16x8_t vqsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))\n" "uint16x8_t vqsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))\n" "uint32x4_t vqsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))\n" "uint32x4_t vqsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))\n" "uint8x16_t vqsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))\n" "uint8x16_t vqsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))\n" "int16x8_t vqsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))\n" "int16x8_t vqsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))\n" "int32x4_t vqsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))\n" "int32x4_t vqsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))\n" "int8x16_t vqsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))\n" "int8x16_t vqsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))\n" "uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))\n" "uint16x8_t vqsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))\n" "uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))\n" "uint32x4_t vqsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))\n" "uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))\n" "uint8x16_t vqsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))\n" "int16x8_t vreinterpretq_s16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))\n" "int16x8_t vreinterpretq_s16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))\n" "int16x8_t vreinterpretq_s16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))\n" "int16x8_t vreinterpretq_s16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))\n" "int16x8_t vreinterpretq_s16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))\n" "int16x8_t vreinterpretq_s16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))\n" "int16x8_t vreinterpretq_s16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))\n" "int16x8_t vreinterpretq_s16(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))\n" "int16x8_t vreinterpretq_s16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))\n" "int16x8_t vreinterpretq_s16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))\n" "int16x8_t vreinterpretq_s16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))\n" "int16x8_t vreinterpretq_s16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))\n" "int16x8_t vreinterpretq_s16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))\n" "int16x8_t vreinterpretq_s16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))\n" "int32x4_t vreinterpretq_s32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))\n" "int32x4_t vreinterpretq_s32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))\n" "int32x4_t vreinterpretq_s32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))\n" "int32x4_t vreinterpretq_s32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))\n" "int32x4_t vreinterpretq_s32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))\n" "int32x4_t vreinterpretq_s32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))\n" "int32x4_t vreinterpretq_s32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))\n" "int32x4_t vreinterpretq_s32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))\n" "int32x4_t vreinterpretq_s32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))\n" "int32x4_t vreinterpretq_s32(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))\n" "int32x4_t vreinterpretq_s32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))\n" "int32x4_t vreinterpretq_s32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))\n" "int32x4_t vreinterpretq_s32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))\n" "int32x4_t vreinterpretq_s32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))\n" "int64x2_t vreinterpretq_s64_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))\n" "int64x2_t vreinterpretq_s64(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))\n" "int64x2_t vreinterpretq_s64_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))\n" "int64x2_t vreinterpretq_s64(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))\n" "int64x2_t vreinterpretq_s64_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))\n" "int64x2_t vreinterpretq_s64(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))\n" "int64x2_t vreinterpretq_s64_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))\n" "int64x2_t vreinterpretq_s64(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))\n" "int64x2_t vreinterpretq_s64_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))\n" "int64x2_t vreinterpretq_s64(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))\n" "int64x2_t vreinterpretq_s64_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))\n" "int64x2_t vreinterpretq_s64(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))\n" "int64x2_t vreinterpretq_s64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))\n" "int64x2_t vreinterpretq_s64(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))\n" "int8x16_t vreinterpretq_s8_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))\n" "int8x16_t vreinterpretq_s8(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))\n" "int8x16_t vreinterpretq_s8_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))\n" "int8x16_t vreinterpretq_s8(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))\n" "int8x16_t vreinterpretq_s8_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))\n" "int8x16_t vreinterpretq_s8(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))\n" "int8x16_t vreinterpretq_s8_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))\n" "int8x16_t vreinterpretq_s8(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))\n" "int8x16_t vreinterpretq_s8_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))\n" "int8x16_t vreinterpretq_s8(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))\n" "int8x16_t vreinterpretq_s8_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))\n" "int8x16_t vreinterpretq_s8(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))\n" "int8x16_t vreinterpretq_s8_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))\n" "int8x16_t vreinterpretq_s8(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))\n" "uint16x8_t vreinterpretq_u16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))\n" "uint16x8_t vreinterpretq_u16(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))\n" "uint16x8_t vreinterpretq_u16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))\n" "uint16x8_t vreinterpretq_u16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))\n" "uint16x8_t vreinterpretq_u16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))\n" "uint16x8_t vreinterpretq_u16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))\n" "uint16x8_t vreinterpretq_u16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))\n" "uint16x8_t vreinterpretq_u16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))\n" "uint16x8_t vreinterpretq_u16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))\n" "uint16x8_t vreinterpretq_u16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))\n" "uint16x8_t vreinterpretq_u16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))\n" "uint16x8_t vreinterpretq_u16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))\n" "uint16x8_t vreinterpretq_u16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))\n" "uint16x8_t vreinterpretq_u16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))\n" "uint32x4_t vreinterpretq_u32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))\n" "uint32x4_t vreinterpretq_u32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))\n" "uint32x4_t vreinterpretq_u32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))\n" "uint32x4_t vreinterpretq_u32(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))\n" "uint32x4_t vreinterpretq_u32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))\n" "uint32x4_t vreinterpretq_u32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))\n" "uint32x4_t vreinterpretq_u32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))\n" "uint32x4_t vreinterpretq_u32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))\n" "uint32x4_t vreinterpretq_u32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))\n" "uint32x4_t vreinterpretq_u32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))\n" "uint32x4_t vreinterpretq_u32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))\n" "uint32x4_t vreinterpretq_u32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))\n" "uint32x4_t vreinterpretq_u32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))\n" "uint32x4_t vreinterpretq_u32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))\n" "uint64x2_t vreinterpretq_u64_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))\n" "uint64x2_t vreinterpretq_u64(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))\n" "uint64x2_t vreinterpretq_u64_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))\n" "uint64x2_t vreinterpretq_u64(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))\n" "uint64x2_t vreinterpretq_u64_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))\n" "uint64x2_t vreinterpretq_u64(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))\n" "uint64x2_t vreinterpretq_u64_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))\n" "uint64x2_t vreinterpretq_u64(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))\n" "uint64x2_t vreinterpretq_u64_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))\n" "uint64x2_t vreinterpretq_u64(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))\n" "uint64x2_t vreinterpretq_u64_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))\n" "uint64x2_t vreinterpretq_u64(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))\n" "uint64x2_t vreinterpretq_u64_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))\n" "uint64x2_t vreinterpretq_u64(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))\n" "uint8x16_t vreinterpretq_u8_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))\n" "uint8x16_t vreinterpretq_u8(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))\n" "uint8x16_t vreinterpretq_u8_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))\n" "uint8x16_t vreinterpretq_u8(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))\n" "uint8x16_t vreinterpretq_u8_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))\n" "uint8x16_t vreinterpretq_u8(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))\n" "uint8x16_t vreinterpretq_u8_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))\n" "uint8x16_t vreinterpretq_u8(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))\n" "uint8x16_t vreinterpretq_u8_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))\n" "uint8x16_t vreinterpretq_u8(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))\n" "uint8x16_t vreinterpretq_u8_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))\n" "uint8x16_t vreinterpretq_u8(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))\n" "uint8x16_t vreinterpretq_u8_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))\n" "uint8x16_t vreinterpretq_u8(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))\n" "int8x16_t vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))\n" "int8x16_t vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))\n" "uint8x16_t vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))\n" "uint8x16_t vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))\n" "int8x16_t vrev16q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))\n" "int8x16_t vrev16q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))\n" "uint8x16_t vrev16q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))\n" "uint8x16_t vrev16q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))\n" "int8x16_t vrev16q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))\n" "int8x16_t vrev16q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))\n" "uint8x16_t vrev16q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))\n" "uint8x16_t vrev16q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))\n" "int16x8_t vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))\n" "int16x8_t vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))\n" "int8x16_t vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))\n" "int8x16_t vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))\n" "uint16x8_t vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))\n" "uint16x8_t vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))\n" "uint8x16_t vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))\n" "uint8x16_t vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))\n" "int16x8_t vrev32q_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))\n" "int16x8_t vrev32q(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))\n" "int8x16_t vrev32q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))\n" "int8x16_t vrev32q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))\n" "uint16x8_t vrev32q_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))\n" "uint16x8_t vrev32q(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))\n" "uint8x16_t vrev32q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))\n" "uint8x16_t vrev32q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))\n" "int16x8_t vrev32q_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))\n" "int16x8_t vrev32q_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))\n" "int8x16_t vrev32q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))\n" "int8x16_t vrev32q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))\n" "uint16x8_t vrev32q_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))\n" "uint16x8_t vrev32q_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))\n" "uint8x16_t vrev32q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))\n" "uint8x16_t vrev32q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))\n" "int16x8_t vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))\n" "int16x8_t vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))\n" "int32x4_t vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))\n" "int32x4_t vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))\n" "int8x16_t vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))\n" "int8x16_t vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))\n" "uint16x8_t vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))\n" "uint16x8_t vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))\n" "uint32x4_t vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))\n" "uint32x4_t vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))\n" "uint8x16_t vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))\n" "uint8x16_t vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))\n" "int16x8_t vrev64q_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))\n" "int16x8_t vrev64q(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))\n" "int32x4_t vrev64q_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))\n" "int32x4_t vrev64q(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))\n" "int8x16_t vrev64q_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))\n" "int8x16_t vrev64q(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))\n" "uint16x8_t vrev64q_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))\n" "uint16x8_t vrev64q(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))\n" "uint32x4_t vrev64q_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))\n" "uint32x4_t vrev64q(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))\n" "uint8x16_t vrev64q_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))\n" "uint8x16_t vrev64q(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))\n" "int16x8_t vrev64q_x_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))\n" "int16x8_t vrev64q_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))\n" "int32x4_t vrev64q_x_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))\n" "int32x4_t vrev64q_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))\n" "int8x16_t vrev64q_x_s8(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))\n" "int8x16_t vrev64q_x(int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))\n" "uint16x8_t vrev64q_x_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))\n" "uint16x8_t vrev64q_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))\n" "uint32x4_t vrev64q_x_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))\n" "uint32x4_t vrev64q_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))\n" "uint8x16_t vrev64q_x_u8(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))\n" "uint8x16_t vrev64q_x(uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))\n" "int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))\n" "int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))\n" "int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))\n" "int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))\n" "int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))\n" "int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))\n" "uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))\n" "uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))\n" "uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))\n" "uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))\n" "uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))\n" "uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))\n" "int16x8_t vrhaddq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))\n" "int16x8_t vrhaddq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))\n" "int32x4_t vrhaddq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))\n" "int32x4_t vrhaddq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))\n" "int8x16_t vrhaddq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))\n" "int8x16_t vrhaddq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))\n" "uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))\n" "uint16x8_t vrhaddq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))\n" "uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))\n" "uint32x4_t vrhaddq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))\n" "uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))\n" "uint8x16_t vrhaddq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))\n" "int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))\n" "int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))\n" "int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))\n" "int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))\n" "int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))\n" "int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))\n" "uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))\n" "uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))\n" "uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))\n" "uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))\n" "uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))\n" "uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))\n" "int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))\n" "int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))\n" "uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))\n" "uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))\n" "int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))\n" "int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))\n" "uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))\n" "uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))\n" "int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))\n" "int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))\n" "int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))\n" "int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))\n" "int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))\n" "int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))\n" "uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))\n" "uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))\n" "int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))\n" "int64_t vrmlaldavhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))\n" "uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))\n" "uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))\n" "int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))\n" "int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))\n" "int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))\n" "int64_t vrmlaldavhxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))\n" "int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))\n" "int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))\n" "int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))\n" "int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))\n" "int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))\n" "int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))\n" "int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))\n" "int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))\n" "int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))\n" "int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))\n" "int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))\n" "int64_t vrmlsldavhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))\n" "int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))\n" "int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))\n" "int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))\n" "int64_t vrmlsldavhxq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))\n" "int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))\n" "int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))\n" "int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))\n" "int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))\n" "int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))\n" "int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))\n" "uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))\n" "uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))\n" "uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))\n" "uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))\n" "uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))\n" "uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))\n" "int16x8_t vrmulhq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))\n" "int16x8_t vrmulhq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))\n" "int32x4_t vrmulhq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))\n" "int32x4_t vrmulhq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))\n" "int8x16_t vrmulhq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))\n" "int8x16_t vrmulhq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))\n" "uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))\n" "uint16x8_t vrmulhq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))\n" "uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))\n" "uint32x4_t vrmulhq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))\n" "uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))\n" "uint8x16_t vrmulhq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))\n" "int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))\n" "int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))\n" "int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))\n" "int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))\n" "int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))\n" "int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))\n" "uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))\n" "uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))\n" "uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))\n" "uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))\n" "uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))\n" "uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))\n" "int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))\n" "int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))\n" "int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))\n" "int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))\n" "int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))\n" "int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))\n" "uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))\n" "uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))\n" "uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))\n" "uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))\n" "uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))\n" "uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))\n" "int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))\n" "int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))\n" "int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))\n" "int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))\n" "int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))\n" "int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))\n" "uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))\n" "uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))\n" "uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))\n" "uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))\n" "uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))\n" "uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))\n" "int16x8_t vrshlq_n_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))\n" "int16x8_t vrshlq(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))\n" "int32x4_t vrshlq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))\n" "int32x4_t vrshlq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))\n" "int8x16_t vrshlq_n_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))\n" "int8x16_t vrshlq(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))\n" "uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))\n" "uint16x8_t vrshlq(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))\n" "uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))\n" "uint32x4_t vrshlq(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))\n" "uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))\n" "uint8x16_t vrshlq(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))\n" "int16x8_t vrshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))\n" "int16x8_t vrshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))\n" "int32x4_t vrshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))\n" "int32x4_t vrshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))\n" "int8x16_t vrshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))\n" "int8x16_t vrshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))\n" "uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))\n" "uint16x8_t vrshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))\n" "uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))\n" "uint32x4_t vrshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))\n" "uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))\n" "uint8x16_t vrshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))\n" "int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))\n" "int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))\n" "int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))\n" "int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))\n" "int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))\n" "int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))\n" "uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))\n" "uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))\n" "uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))\n" "uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))\n" "uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))\n" "uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))\n" "int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))\n" "int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))\n" "int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))\n" "int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))\n" "uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))\n" "uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))\n" "uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))\n" "uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))\n" "int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))\n" "int8x16_t vrshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))\n" "int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))\n" "int16x8_t vrshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))\n" "uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))\n" "uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))\n" "uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))\n" "uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))\n" "int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))\n" "int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))\n" "int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))\n" "int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))\n" "uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))\n" "uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))\n" "uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))\n" "uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))\n" "int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))\n" "int8x16_t vrshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))\n" "int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))\n" "int16x8_t vrshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))\n" "uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))\n" "uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))\n" "uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))\n" "uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))\n" "int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))\n" "int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))\n" "int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))\n" "int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))\n" "int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))\n" "int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))\n" "uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))\n" "uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))\n" "uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))\n" "uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))\n" "uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))\n" "uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))\n" "int16x8_t vrshrq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))\n" "int16x8_t vrshrq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))\n" "int32x4_t vrshrq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))\n" "int32x4_t vrshrq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))\n" "int8x16_t vrshrq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))\n" "int8x16_t vrshrq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))\n" "uint16x8_t vrshrq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))\n" "uint16x8_t vrshrq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))\n" "uint32x4_t vrshrq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))\n" "uint32x4_t vrshrq(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))\n" "uint8x16_t vrshrq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))\n" "uint8x16_t vrshrq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))\n" "int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))\n" "int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))\n" "int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))\n" "int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))\n" "int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))\n" "int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))\n" "uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))\n" "uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))\n" "uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))\n" "uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))\n" "uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))\n" "uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))\n" "int32x4_t vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))\n" "int32x4_t vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))\n" "uint32x4_t vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))\n" "uint32x4_t vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))\n" "int32x4_t vsbciq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))\n" "int32x4_t vsbciq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))\n" "uint32x4_t vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))\n" "uint32x4_t vsbciq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))\n" "int32x4_t vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))\n" "int32x4_t vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))\n" "uint32x4_t vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))\n" "uint32x4_t vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))\n" "int32x4_t vsbcq_s32(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))\n" "int32x4_t vsbcq(int32x4_t, int32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))\n" "uint32x4_t vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))\n" "uint32x4_t vsbcq(uint32x4_t, uint32x4_t, unsigned *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))\n" "int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))\n" "int16x8_t vsetq_lane(int16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))\n" "int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))\n" "int32x4_t vsetq_lane(int32_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))\n" "int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))\n" "int64x2_t vsetq_lane(int64_t, int64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))\n" "int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))\n" "int8x16_t vsetq_lane(int8_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))\n" "uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))\n" "uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))\n" "uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))\n" "uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))\n" "uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))\n" "uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))\n" "uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))\n" "uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))\n" "int16x8_t vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))\n" "int16x8_t vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))\n" "int32x4_t vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))\n" "int32x4_t vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))\n" "int8x16_t vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))\n" "int8x16_t vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))\n" "uint16x8_t vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))\n" "uint16x8_t vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))\n" "uint32x4_t vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))\n" "uint32x4_t vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))\n" "uint8x16_t vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))\n" "uint8x16_t vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))\n" "int16x8_t vshlcq_s16(int16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))\n" "int16x8_t vshlcq(int16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))\n" "int32x4_t vshlcq_s32(int32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))\n" "int32x4_t vshlcq(int32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))\n" "int8x16_t vshlcq_s8(int8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))\n" "int8x16_t vshlcq(int8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))\n" "uint16x8_t vshlcq_u16(uint16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))\n" "uint16x8_t vshlcq(uint16x8_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))\n" "uint32x4_t vshlcq_u32(uint32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))\n" "uint32x4_t vshlcq(uint32x4_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))\n" "uint8x16_t vshlcq_u8(uint8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))\n" "uint8x16_t vshlcq(uint8x16_t, uint32_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))\n" "int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))\n" "int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))\n" "int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))\n" "int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))\n" "uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))\n" "uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))\n" "uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))\n" "uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))\n" "int32x4_t vshllbq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))\n" "int32x4_t vshllbq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))\n" "int16x8_t vshllbq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))\n" "int16x8_t vshllbq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))\n" "uint32x4_t vshllbq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))\n" "uint32x4_t vshllbq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))\n" "uint16x8_t vshllbq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))\n" "uint16x8_t vshllbq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))\n" "int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))\n" "int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))\n" "int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))\n" "int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))\n" "uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))\n" "uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))\n" "uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))\n" "uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))\n" "int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))\n" "int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))\n" "int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))\n" "int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))\n" "uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))\n" "uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))\n" "uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))\n" "uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))\n" "int32x4_t vshlltq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))\n" "int32x4_t vshlltq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))\n" "int16x8_t vshlltq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))\n" "int16x8_t vshlltq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))\n" "uint32x4_t vshlltq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))\n" "uint32x4_t vshlltq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))\n" "uint16x8_t vshlltq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))\n" "uint16x8_t vshlltq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))\n" "int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))\n" "int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))\n" "int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))\n" "int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))\n" "uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))\n" "uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))\n" "uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))\n" "uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))\n" "int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))\n" "int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))\n" "int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))\n" "int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))\n" "int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))\n" "int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))\n" "uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))\n" "uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))\n" "uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))\n" "uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))\n" "uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))\n" "uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))\n" "int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))\n" "int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))\n" "int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))\n" "int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))\n" "int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))\n" "int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))\n" "uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))\n" "uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))\n" "uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))\n" "uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))\n" "uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))\n" "uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))\n" "int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))\n" "int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))\n" "int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))\n" "int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))\n" "int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))\n" "int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))\n" "uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))\n" "uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))\n" "uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))\n" "uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))\n" "uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))\n" "uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))\n" "int16x8_t vshlq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))\n" "int16x8_t vshlq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))\n" "int32x4_t vshlq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))\n" "int32x4_t vshlq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))\n" "int8x16_t vshlq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))\n" "int8x16_t vshlq_n(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))\n" "uint16x8_t vshlq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))\n" "uint16x8_t vshlq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))\n" "uint32x4_t vshlq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))\n" "uint32x4_t vshlq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))\n" "uint8x16_t vshlq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))\n" "uint8x16_t vshlq_n(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))\n" "int16x8_t vshlq_r_s16(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))\n" "int16x8_t vshlq_r(int16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))\n" "int32x4_t vshlq_r_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))\n" "int32x4_t vshlq_r(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))\n" "int8x16_t vshlq_r_s8(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))\n" "int8x16_t vshlq_r(int8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))\n" "uint16x8_t vshlq_r_u16(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))\n" "uint16x8_t vshlq_r(uint16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))\n" "uint32x4_t vshlq_r_u32(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))\n" "uint32x4_t vshlq_r(uint32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))\n" "uint8x16_t vshlq_r_u8(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))\n" "uint8x16_t vshlq_r(uint8x16_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))\n" "int16x8_t vshlq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))\n" "int16x8_t vshlq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))\n" "int32x4_t vshlq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))\n" "int32x4_t vshlq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))\n" "int8x16_t vshlq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))\n" "int8x16_t vshlq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))\n" "uint16x8_t vshlq_u16(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))\n" "uint16x8_t vshlq(uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))\n" "uint32x4_t vshlq_u32(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))\n" "uint32x4_t vshlq(uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))\n" "uint8x16_t vshlq_u8(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))\n" "uint8x16_t vshlq(uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))\n" "int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))\n" "int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))\n" "int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))\n" "int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))\n" "int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))\n" "int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))\n" "uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))\n" "uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))\n" "uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))\n" "uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))\n" "uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))\n" "uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))\n" "int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))\n" "int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))\n" "int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))\n" "int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))\n" "int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))\n" "int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))\n" "uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))\n" "uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))\n" "uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))\n" "uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))\n" "uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))\n" "uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))\n" "int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))\n" "int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))\n" "int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))\n" "int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))\n" "uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))\n" "uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))\n" "uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))\n" "uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))\n" "int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))\n" "int8x16_t vshrnbq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))\n" "int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))\n" "int16x8_t vshrnbq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))\n" "uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))\n" "uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))\n" "uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))\n" "uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))\n" "int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))\n" "int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))\n" "int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))\n" "int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))\n" "uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))\n" "uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))\n" "uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))\n" "uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))\n" "int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))\n" "int8x16_t vshrntq(int8x16_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))\n" "int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))\n" "int16x8_t vshrntq(int16x8_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))\n" "uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))\n" "uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))\n" "uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))\n" "uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))\n" "int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))\n" "int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))\n" "int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))\n" "int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))\n" "int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))\n" "int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))\n" "uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))\n" "uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))\n" "uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))\n" "uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))\n" "uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))\n" "uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))\n" "int16x8_t vshrq_n_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))\n" "int16x8_t vshrq(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))\n" "int32x4_t vshrq_n_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))\n" "int32x4_t vshrq(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))\n" "int8x16_t vshrq_n_s8(int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))\n" "int8x16_t vshrq(int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))\n" "uint16x8_t vshrq_n_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))\n" "uint16x8_t vshrq(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))\n" "uint32x4_t vshrq_n_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))\n" "uint32x4_t vshrq(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))\n" "uint8x16_t vshrq_n_u8(uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))\n" "uint8x16_t vshrq(uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))\n" "int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))\n" "int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))\n" "int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))\n" "int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))\n" "int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))\n" "int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))\n" "uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))\n" "uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))\n" "uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))\n" "uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))\n" "uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))\n" "uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))\n" "int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))\n" "int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))\n" "int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))\n" "int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))\n" "int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))\n" "int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))\n" "uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))\n" "uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))\n" "uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))\n" "uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))\n" "uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))\n" "uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))\n" "int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))\n" "int16x8_t vsliq(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))\n" "int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))\n" "int32x4_t vsliq(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))\n" "int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))\n" "int8x16_t vsliq(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))\n" "uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))\n" "uint16x8_t vsliq(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))\n" "uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))\n" "uint32x4_t vsliq(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))\n" "uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))\n" "uint8x16_t vsliq(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))\n" "int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))\n" "int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))\n" "int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))\n" "int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))\n" "int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))\n" "int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))\n" "uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))\n" "uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))\n" "uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))\n" "uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))\n" "uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))\n" "uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))\n" "int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))\n" "int16x8_t vsriq(int16x8_t, int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))\n" "int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))\n" "int32x4_t vsriq(int32x4_t, int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))\n" "int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))\n" "int8x16_t vsriq(int8x16_t, int8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))\n" "uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))\n" "uint16x8_t vsriq(uint16x8_t, uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))\n" "uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))\n" "uint32x4_t vsriq(uint32x4_t, uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))\n" "uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))\n" "uint8x16_t vsriq(uint8x16_t, uint8x16_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))\n" "void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))\n" "void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))\n" "void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))\n" "void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))\n" "void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))\n" "void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))\n" "void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))\n" "void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))\n" "void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))\n" "void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))\n" "void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))\n" "void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))\n" "void vst1q_s16(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))\n" "void vst1q(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))\n" "void vst1q_s32(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))\n" "void vst1q(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))\n" "void vst1q_s8(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))\n" "void vst1q(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))\n" "void vst1q_u16(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))\n" "void vst1q(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))\n" "void vst1q_u32(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))\n" "void vst1q(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))\n" "void vst1q_u8(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))\n" "void vst1q(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))\n" "void vst2q_s16(int16_t *, int16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))\n" "void vst2q(int16_t *, int16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))\n" "void vst2q_s32(int32_t *, int32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))\n" "void vst2q(int32_t *, int32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))\n" "void vst2q_s8(int8_t *, int8x16x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))\n" "void vst2q(int8_t *, int8x16x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))\n" "void vst2q_u16(uint16_t *, uint16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))\n" "void vst2q(uint16_t *, uint16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))\n" "void vst2q_u32(uint32_t *, uint32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))\n" "void vst2q(uint32_t *, uint32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))\n" "void vst2q_u8(uint8_t *, uint8x16x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))\n" "void vst2q(uint8_t *, uint8x16x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))\n" "void vst4q_s16(int16_t *, int16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))\n" "void vst4q(int16_t *, int16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))\n" "void vst4q_s32(int32_t *, int32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))\n" "void vst4q(int32_t *, int32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))\n" "void vst4q_s8(int8_t *, int8x16x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))\n" "void vst4q(int8_t *, int8x16x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))\n" "void vst4q_u16(uint16_t *, uint16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))\n" "void vst4q(uint16_t *, uint16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))\n" "void vst4q_u32(uint32_t *, uint32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))\n" "void vst4q(uint32_t *, uint32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))\n" "void vst4q_u8(uint8_t *, uint8x16x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))\n" "void vst4q(uint8_t *, uint8x16x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))\n" "void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))\n" "void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))\n" "void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))\n" "void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))\n" "void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))\n" "void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))\n" "void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))\n" "void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))\n" "void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))\n" "void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))\n" "void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))\n" "void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))\n" "void vstrbq_s16(int8_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))\n" "void vstrbq(int8_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))\n" "void vstrbq_s32(int8_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))\n" "void vstrbq(int8_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))\n" "void vstrbq_s8(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))\n" "void vstrbq(int8_t *, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))\n" "void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))\n" "void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))\n" "void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))\n" "void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))\n" "void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))\n" "void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))\n" "void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))\n" "void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))\n" "void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))\n" "void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))\n" "void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))\n" "void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))\n" "void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))\n" "void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))\n" "void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))\n" "void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))\n" "void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))\n" "void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))\n" "void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))\n" "void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))\n" "void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))\n" "void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))\n" "void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))\n" "void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))\n" "void vstrbq_u16(uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))\n" "void vstrbq(uint8_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))\n" "void vstrbq_u32(uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))\n" "void vstrbq(uint8_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))\n" "void vstrbq_u8(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))\n" "void vstrbq(uint8_t *, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))\n" "void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))\n" "void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))\n" "void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))\n" "void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))\n" "void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))\n" "void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))\n" "void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))\n" "void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))\n" "void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))\n" "void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))\n" "void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))\n" "void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))\n" "void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))\n" "void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))\n" "void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))\n" "void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))\n" "void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))\n" "void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))\n" "void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))\n" "void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))\n" "void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))\n" "void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))\n" "void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))\n" "void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))\n" "void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))\n" "void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))\n" "void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))\n" "void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))\n" "void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))\n" "void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))\n" "void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))\n" "void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))\n" "void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))\n" "void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))\n" "void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))\n" "void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))\n" "void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))\n" "void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))\n" "void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))\n" "void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))\n" "void vstrhq_s16(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))\n" "void vstrhq(int16_t *, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))\n" "void vstrhq_s32(int16_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))\n" "void vstrhq(int16_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))\n" "void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))\n" "void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))\n" "void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))\n" "void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))\n" "void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))\n" "void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))\n" "void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))\n" "void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))\n" "void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))\n" "void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))\n" "void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))\n" "void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))\n" "void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))\n" "void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))\n" "void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))\n" "void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))\n" "void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))\n" "void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))\n" "void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))\n" "void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))\n" "void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))\n" "void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))\n" "void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))\n" "void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))\n" "void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))\n" "void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))\n" "void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))\n" "void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))\n" "void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))\n" "void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))\n" "void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))\n" "void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))\n" "void vstrhq_u16(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))\n" "void vstrhq(uint16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))\n" "void vstrhq_u32(uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))\n" "void vstrhq(uint16_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))\n" "void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))\n" "void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))\n" "void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))\n" "void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))\n" "void vstrwq_s32(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))\n" "void vstrwq(int32_t *, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))\n" "void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))\n" "void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))\n" "void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))\n" "void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))\n" "void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))\n" "void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))\n" "void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))\n" "void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))\n" "void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))\n" "void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))\n" "void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))\n" "void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))\n" "void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))\n" "void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))\n" "void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))\n" "void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))\n" "void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))\n" "void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))\n" "void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))\n" "void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))\n" "void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))\n" "void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))\n" "void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))\n" "void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))\n" "void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))\n" "void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))\n" "void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))\n" "void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))\n" "void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))\n" "void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))\n" "void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))\n" "void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))\n" "void vstrwq_u32(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))\n" "void vstrwq(uint32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))\n" "int16x8_t vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))\n" "int16x8_t vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))\n" "int32x4_t vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))\n" "int32x4_t vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))\n" "int8x16_t vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))\n" "int8x16_t vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))\n" "uint16x8_t vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))\n" "uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))\n" "uint32x4_t vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))\n" "uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))\n" "uint8x16_t vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))\n" "uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))\n" "int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))\n" "int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))\n" "int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))\n" "int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))\n" "int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))\n" "int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))\n" "uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))\n" "uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))\n" "uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))\n" "uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))\n" "uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))\n" "uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))\n" "int16x8_t vsubq_n_s16(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))\n" "int16x8_t vsubq(int16x8_t, int16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))\n" "int32x4_t vsubq_n_s32(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))\n" "int32x4_t vsubq(int32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))\n" "int8x16_t vsubq_n_s8(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))\n" "int8x16_t vsubq(int8x16_t, int8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))\n" "uint16x8_t vsubq_n_u16(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))\n" "uint16x8_t vsubq(uint16x8_t, uint16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))\n" "uint32x4_t vsubq_n_u32(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))\n" "uint32x4_t vsubq(uint32x4_t, uint32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))\n" "uint8x16_t vsubq_n_u8(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))\n" "uint8x16_t vsubq(uint8x16_t, uint8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))\n" "int16x8_t vsubq_s16(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))\n" "int16x8_t vsubq(int16x8_t, int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))\n" "int32x4_t vsubq_s32(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))\n" "int32x4_t vsubq(int32x4_t, int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))\n" "int8x16_t vsubq_s8(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))\n" "int8x16_t vsubq(int8x16_t, int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))\n" "uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))\n" "uint16x8_t vsubq(uint16x8_t, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))\n" "uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))\n" "uint32x4_t vsubq(uint32x4_t, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))\n" "uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))\n" "uint8x16_t vsubq(uint8x16_t, uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))\n" "int16x8_t vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))\n" "int16x8_t vsubq_x(int16x8_t, int16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))\n" "int32x4_t vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))\n" "int32x4_t vsubq_x(int32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))\n" "int8x16_t vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))\n" "int8x16_t vsubq_x(int8x16_t, int8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))\n" "uint16x8_t vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))\n" "uint16x8_t vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))\n" "uint32x4_t vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))\n" "uint32x4_t vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))\n" "uint8x16_t vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))\n" "uint8x16_t vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))\n" "int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))\n" "int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))\n" "int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))\n" "int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))\n" "int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))\n" "int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))\n" "uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))\n" "uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))\n" "uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))\n" "uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))\n" "uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))\n" "uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))\n" "int16x8_t vuninitializedq(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))\n" "int32x4_t vuninitializedq(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))\n" "int64x2_t vuninitializedq(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))\n" "int8x16_t vuninitializedq(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))\n" "uint16x8_t vuninitializedq(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))\n" "uint32x4_t vuninitializedq(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))\n" "uint64x2_t vuninitializedq(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))\n" "uint8x16_t vuninitializedq(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))\n" "int16x8_t vuninitializedq_s16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))\n" "int32x4_t vuninitializedq_s32();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))\n" "int64x2_t vuninitializedq_s64();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))\n" "int8x16_t vuninitializedq_s8();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))\n" "uint16x8_t vuninitializedq_u16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))\n" "uint32x4_t vuninitializedq_u32();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))\n" "uint64x2_t vuninitializedq_u64();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))\n" "uint8x16_t vuninitializedq_u8();\n" "\n" "#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */\n" "\n" "#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)\n" "\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))\n" "float16x8_t vabdq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))\n" "float16x8_t vabdq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))\n" "float32x4_t vabdq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))\n" "float32x4_t vabdq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))\n" "float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))\n" "float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))\n" "float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))\n" "float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))\n" "float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))\n" "float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))\n" "float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))\n" "float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))\n" "float16x8_t vabsq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))\n" "float16x8_t vabsq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))\n" "float32x4_t vabsq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))\n" "float32x4_t vabsq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))\n" "float16x8_t vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))\n" "float16x8_t vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))\n" "float32x4_t vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))\n" "float32x4_t vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))\n" "float16x8_t vabsq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))\n" "float16x8_t vabsq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))\n" "float32x4_t vabsq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))\n" "float32x4_t vabsq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))\n" "float16x8_t vaddq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))\n" "float16x8_t vaddq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))\n" "float32x4_t vaddq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))\n" "float32x4_t vaddq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))\n" "float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))\n" "float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))\n" "float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))\n" "float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))\n" "float16x8_t vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))\n" "float16x8_t vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))\n" "float32x4_t vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))\n" "float32x4_t vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))\n" "float16x8_t vaddq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))\n" "float16x8_t vaddq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))\n" "float32x4_t vaddq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))\n" "float32x4_t vaddq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))\n" "float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))\n" "float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))\n" "float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))\n" "float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))\n" "float16x8_t vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))\n" "float16x8_t vaddq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))\n" "float32x4_t vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))\n" "float32x4_t vaddq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))\n" "float16x8_t vandq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))\n" "float16x8_t vandq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))\n" "float32x4_t vandq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))\n" "float32x4_t vandq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))\n" "float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))\n" "float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))\n" "float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))\n" "float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))\n" "float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))\n" "float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))\n" "float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))\n" "float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))\n" "float16x8_t vbicq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))\n" "float16x8_t vbicq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))\n" "float32x4_t vbicq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))\n" "float32x4_t vbicq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))\n" "float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))\n" "float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))\n" "float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))\n" "float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))\n" "float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))\n" "float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))\n" "float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))\n" "float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))\n" "float16x8_t vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))\n" "float16x8_t vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))\n" "float32x4_t vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))\n" "float32x4_t vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))\n" "float16x8_t vbrsrq_n_f16(float16x8_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))\n" "float16x8_t vbrsrq(float16x8_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))\n" "float32x4_t vbrsrq_n_f32(float32x4_t, int32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))\n" "float32x4_t vbrsrq(float32x4_t, int32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))\n" "float16x8_t vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))\n" "float16x8_t vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))\n" "float32x4_t vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))\n" "float32x4_t vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))\n" "float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))\n" "float16x8_t vcaddq_rot270(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))\n" "float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))\n" "float32x4_t vcaddq_rot270(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))\n" "float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))\n" "float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))\n" "float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))\n" "float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))\n" "float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))\n" "float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))\n" "float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))\n" "float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))\n" "float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))\n" "float16x8_t vcaddq_rot90(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))\n" "float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))\n" "float32x4_t vcaddq_rot90(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))\n" "float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))\n" "float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))\n" "float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))\n" "float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))\n" "float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))\n" "float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))\n" "float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))\n" "float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))\n" "float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))\n" "float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))\n" "float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))\n" "float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))\n" "float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))\n" "float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))\n" "float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))\n" "float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))\n" "float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))\n" "float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))\n" "float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))\n" "float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))\n" "float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))\n" "float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))\n" "float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))\n" "float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))\n" "float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))\n" "float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))\n" "float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))\n" "float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))\n" "float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))\n" "float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))\n" "float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))\n" "float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))\n" "float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))\n" "float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))\n" "float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))\n" "float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))\n" "float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))\n" "float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))\n" "float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))\n" "float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))\n" "mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))\n" "mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))\n" "mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))\n" "mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))\n" "mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))\n" "mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))\n" "mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))\n" "mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))\n" "mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))\n" "mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))\n" "mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))\n" "mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))\n" "mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))\n" "mve_pred16_t vcmpeqq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))\n" "mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))\n" "mve_pred16_t vcmpeqq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))\n" "mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))\n" "mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))\n" "mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))\n" "mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))\n" "mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))\n" "mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))\n" "mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))\n" "mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))\n" "mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))\n" "mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))\n" "mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))\n" "mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))\n" "mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))\n" "mve_pred16_t vcmpgeq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))\n" "mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))\n" "mve_pred16_t vcmpgeq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))\n" "mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))\n" "mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))\n" "mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))\n" "mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))\n" "mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))\n" "mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))\n" "mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))\n" "mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))\n" "mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))\n" "mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))\n" "mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))\n" "mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))\n" "mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))\n" "mve_pred16_t vcmpgtq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))\n" "mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))\n" "mve_pred16_t vcmpgtq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))\n" "mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))\n" "mve_pred16_t vcmpleq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))\n" "mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))\n" "mve_pred16_t vcmpleq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))\n" "mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))\n" "mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))\n" "mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))\n" "mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))\n" "mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))\n" "mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))\n" "mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))\n" "mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))\n" "mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))\n" "mve_pred16_t vcmpleq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))\n" "mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))\n" "mve_pred16_t vcmpleq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))\n" "mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))\n" "mve_pred16_t vcmpltq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))\n" "mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))\n" "mve_pred16_t vcmpltq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))\n" "mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))\n" "mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))\n" "mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))\n" "mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))\n" "mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))\n" "mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))\n" "mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))\n" "mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))\n" "mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))\n" "mve_pred16_t vcmpltq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))\n" "mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))\n" "mve_pred16_t vcmpltq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))\n" "mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))\n" "mve_pred16_t vcmpneq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))\n" "mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))\n" "mve_pred16_t vcmpneq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))\n" "mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))\n" "mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))\n" "mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))\n" "mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))\n" "mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))\n" "mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))\n" "mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))\n" "mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))\n" "mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))\n" "mve_pred16_t vcmpneq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))\n" "mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))\n" "mve_pred16_t vcmpneq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))\n" "float16x8_t vcmulq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))\n" "float16x8_t vcmulq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))\n" "float32x4_t vcmulq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))\n" "float32x4_t vcmulq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))\n" "float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))\n" "float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))\n" "float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))\n" "float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))\n" "float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))\n" "float16x8_t vcmulq_rot180(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))\n" "float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))\n" "float32x4_t vcmulq_rot180(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))\n" "float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))\n" "float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))\n" "float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))\n" "float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))\n" "float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))\n" "float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))\n" "float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))\n" "float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))\n" "float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))\n" "float16x8_t vcmulq_rot270(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))\n" "float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))\n" "float32x4_t vcmulq_rot270(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))\n" "float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))\n" "float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))\n" "float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))\n" "float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))\n" "float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))\n" "float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))\n" "float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))\n" "float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))\n" "float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))\n" "float16x8_t vcmulq_rot90(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))\n" "float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))\n" "float32x4_t vcmulq_rot90(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))\n" "float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))\n" "float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))\n" "float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))\n" "float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))\n" "float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))\n" "float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))\n" "float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))\n" "float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))\n" "float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))\n" "float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))\n" "float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))\n" "float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))\n" "float16x8_t vcreateq_f16(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))\n" "float32x4_t vcreateq_f32(uint64_t, uint64_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))\n" "int16x8_t vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))\n" "int16x8_t vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))\n" "int32x4_t vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))\n" "int32x4_t vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))\n" "uint16x8_t vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))\n" "uint16x8_t vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))\n" "uint32x4_t vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))\n" "uint32x4_t vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))\n" "int16x8_t vcvtaq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))\n" "int32x4_t vcvtaq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))\n" "uint16x8_t vcvtaq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))\n" "uint32x4_t vcvtaq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))\n" "int16x8_t vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))\n" "int32x4_t vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))\n" "uint16x8_t vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))\n" "uint32x4_t vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))\n" "float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))\n" "float32x4_t vcvtbq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))\n" "float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))\n" "float32x4_t vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))\n" "float32x4_t vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))\n" "int16x8_t vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))\n" "int16x8_t vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))\n" "int32x4_t vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))\n" "int32x4_t vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))\n" "uint16x8_t vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))\n" "uint16x8_t vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))\n" "uint32x4_t vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))\n" "uint32x4_t vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))\n" "int16x8_t vcvtmq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))\n" "int32x4_t vcvtmq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))\n" "uint16x8_t vcvtmq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))\n" "uint32x4_t vcvtmq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))\n" "int16x8_t vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))\n" "int32x4_t vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))\n" "uint16x8_t vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))\n" "uint32x4_t vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))\n" "int16x8_t vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))\n" "int16x8_t vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))\n" "int32x4_t vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))\n" "int32x4_t vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))\n" "uint16x8_t vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))\n" "uint16x8_t vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))\n" "uint32x4_t vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))\n" "uint32x4_t vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))\n" "int16x8_t vcvtnq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))\n" "int32x4_t vcvtnq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))\n" "uint16x8_t vcvtnq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))\n" "uint32x4_t vcvtnq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))\n" "int16x8_t vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))\n" "int32x4_t vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))\n" "uint16x8_t vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))\n" "uint32x4_t vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))\n" "int16x8_t vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))\n" "int16x8_t vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))\n" "int32x4_t vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))\n" "int32x4_t vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))\n" "uint16x8_t vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))\n" "uint16x8_t vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))\n" "uint32x4_t vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))\n" "uint32x4_t vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))\n" "int16x8_t vcvtpq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))\n" "int32x4_t vcvtpq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))\n" "uint16x8_t vcvtpq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))\n" "uint32x4_t vcvtpq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))\n" "int16x8_t vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))\n" "int32x4_t vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))\n" "uint16x8_t vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))\n" "uint32x4_t vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))\n" "float16x8_t vcvtq_f16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))\n" "float16x8_t vcvtq(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))\n" "float16x8_t vcvtq_f16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))\n" "float16x8_t vcvtq(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))\n" "float32x4_t vcvtq_f32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))\n" "float32x4_t vcvtq(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))\n" "float32x4_t vcvtq_f32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))\n" "float32x4_t vcvtq(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))\n" "float16x8_t vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))\n" "float16x8_t vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))\n" "float16x8_t vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))\n" "float16x8_t vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))\n" "float32x4_t vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))\n" "float32x4_t vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))\n" "float32x4_t vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))\n" "float32x4_t vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))\n" "float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))\n" "float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))\n" "float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))\n" "float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))\n" "float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))\n" "float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))\n" "float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))\n" "float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))\n" "int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))\n" "int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))\n" "int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))\n" "int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))\n" "uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))\n" "uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))\n" "uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))\n" "uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))\n" "int16x8_t vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))\n" "int16x8_t vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))\n" "int32x4_t vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))\n" "int32x4_t vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))\n" "uint16x8_t vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))\n" "uint16x8_t vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))\n" "uint32x4_t vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))\n" "uint32x4_t vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))\n" "float16x8_t vcvtq_n_f16_s16(int16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))\n" "float16x8_t vcvtq_n(int16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))\n" "float16x8_t vcvtq_n_f16_u16(uint16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))\n" "float16x8_t vcvtq_n(uint16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))\n" "float32x4_t vcvtq_n_f32_s32(int32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))\n" "float32x4_t vcvtq_n(int32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))\n" "float32x4_t vcvtq_n_f32_u32(uint32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))\n" "float32x4_t vcvtq_n(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))\n" "int16x8_t vcvtq_n_s16_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))\n" "int32x4_t vcvtq_n_s32_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))\n" "uint16x8_t vcvtq_n_u16_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))\n" "uint32x4_t vcvtq_n_u32_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))\n" "int16x8_t vcvtq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))\n" "int32x4_t vcvtq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))\n" "uint16x8_t vcvtq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))\n" "uint32x4_t vcvtq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))\n" "float16x8_t vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))\n" "float16x8_t vcvtq_x(int16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))\n" "float16x8_t vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))\n" "float16x8_t vcvtq_x(uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))\n" "float32x4_t vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))\n" "float32x4_t vcvtq_x(int32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))\n" "float32x4_t vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))\n" "float32x4_t vcvtq_x(uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))\n" "float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))\n" "float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))\n" "float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))\n" "float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))\n" "float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))\n" "float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))\n" "float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))\n" "float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))\n" "int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))\n" "int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))\n" "uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))\n" "uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))\n" "int16x8_t vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))\n" "int32x4_t vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))\n" "uint16x8_t vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))\n" "uint32x4_t vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))\n" "float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))\n" "float32x4_t vcvttq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))\n" "float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))\n" "float32x4_t vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))\n" "float32x4_t vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))\n" "float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))\n" "float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))\n" "float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))\n" "float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))\n" "float16x8_t vdupq_n_f16(float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))\n" "float32x4_t vdupq_n_f32(float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))\n" "float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))\n" "float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))\n" "float16x8_t veorq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))\n" "float16x8_t veorq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))\n" "float32x4_t veorq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))\n" "float32x4_t veorq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))\n" "float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))\n" "float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))\n" "float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))\n" "float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))\n" "float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))\n" "float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))\n" "float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))\n" "float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))\n" "float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))\n" "float16x8_t vfmaq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))\n" "float32x4_t vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))\n" "float32x4_t vfmaq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))\n" "float16x8_t vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))\n" "float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))\n" "float32x4_t vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))\n" "float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))\n" "float16x8_t vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))\n" "float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))\n" "float32x4_t vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))\n" "float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))\n" "float16x8_t vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))\n" "float16x8_t vfmaq(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))\n" "float32x4_t vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))\n" "float32x4_t vfmaq(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))\n" "float16x8_t vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))\n" "float16x8_t vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))\n" "float32x4_t vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))\n" "float32x4_t vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))\n" "float16x8_t vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))\n" "float16x8_t vfmasq(float16x8_t, float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))\n" "float32x4_t vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))\n" "float32x4_t vfmasq(float32x4_t, float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))\n" "float16x8_t vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))\n" "float16x8_t vfmsq(float16x8_t, float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))\n" "float32x4_t vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))\n" "float32x4_t vfmsq(float32x4_t, float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))\n" "float16x8_t vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))\n" "float16x8_t vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))\n" "float32x4_t vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))\n" "float32x4_t vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))\n" "float16_t vgetq_lane_f16(float16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))\n" "float16_t vgetq_lane(float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))\n" "float32_t vgetq_lane_f32(float32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))\n" "float32_t vgetq_lane(float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))\n" "float16x8_t vld1q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))\n" "float16x8_t vld1q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))\n" "float32x4_t vld1q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))\n" "float32x4_t vld1q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))\n" "float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))\n" "float16x8_t vld1q_z(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))\n" "float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))\n" "float32x4_t vld1q_z(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))\n" "float16x8x2_t vld2q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))\n" "float16x8x2_t vld2q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))\n" "float32x4x2_t vld2q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))\n" "float32x4x2_t vld2q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))\n" "float16x8x4_t vld4q_f16(const float16_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))\n" "float16x8x4_t vld4q(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))\n" "float32x4x4_t vld4q_f32(const float32_t *);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))\n" "float32x4x4_t vld4q(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))\n" "float16x8_t vldrhq_f16(const float16_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))\n" "float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))\n" "float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))\n" "float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))\n" "float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))\n" "float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))\n" "float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))\n" "float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))\n" "float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))\n" "float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))\n" "float32x4_t vldrwq_f32(const float32_t *);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))\n" "float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))\n" "float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))\n" "float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))\n" "float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))\n" "float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))\n" "float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))\n" "float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))\n" "float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))\n" "float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))\n" "float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))\n" "float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))\n" "float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))\n" "float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))\n" "float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))\n" "float16x8_t vmaxnmaq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))\n" "float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))\n" "float32x4_t vmaxnmaq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))\n" "float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))\n" "float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))\n" "float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))\n" "float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))\n" "float16_t vmaxnmavq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))\n" "float16_t vmaxnmavq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))\n" "float32_t vmaxnmavq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))\n" "float32_t vmaxnmavq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))\n" "float16_t vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))\n" "float16_t vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))\n" "float32_t vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))\n" "float32_t vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))\n" "float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))\n" "float16x8_t vmaxnmq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))\n" "float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))\n" "float32x4_t vmaxnmq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))\n" "float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))\n" "float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))\n" "float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))\n" "float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))\n" "float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))\n" "float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))\n" "float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))\n" "float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))\n" "float16_t vmaxnmvq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))\n" "float16_t vmaxnmvq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))\n" "float32_t vmaxnmvq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))\n" "float32_t vmaxnmvq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))\n" "float16_t vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))\n" "float16_t vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))\n" "float32_t vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))\n" "float32_t vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))\n" "float16x8_t vminnmaq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))\n" "float16x8_t vminnmaq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))\n" "float32x4_t vminnmaq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))\n" "float32x4_t vminnmaq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))\n" "float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))\n" "float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))\n" "float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))\n" "float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))\n" "float16_t vminnmavq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))\n" "float16_t vminnmavq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))\n" "float32_t vminnmavq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))\n" "float32_t vminnmavq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))\n" "float16_t vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))\n" "float16_t vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))\n" "float32_t vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))\n" "float32_t vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))\n" "float16x8_t vminnmq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))\n" "float16x8_t vminnmq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))\n" "float32x4_t vminnmq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))\n" "float32x4_t vminnmq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))\n" "float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))\n" "float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))\n" "float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))\n" "float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))\n" "float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))\n" "float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))\n" "float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))\n" "float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))\n" "float16_t vminnmvq_f16(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))\n" "float16_t vminnmvq(float16_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))\n" "float32_t vminnmvq_f32(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))\n" "float32_t vminnmvq(float32_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))\n" "float16_t vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))\n" "float16_t vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))\n" "float32_t vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))\n" "float32_t vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))\n" "float16x8_t vmulq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))\n" "float16x8_t vmulq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))\n" "float32x4_t vmulq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))\n" "float32x4_t vmulq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))\n" "float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))\n" "float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))\n" "float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))\n" "float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))\n" "float16x8_t vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))\n" "float16x8_t vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))\n" "float32x4_t vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))\n" "float32x4_t vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))\n" "float16x8_t vmulq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))\n" "float16x8_t vmulq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))\n" "float32x4_t vmulq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))\n" "float32x4_t vmulq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))\n" "float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))\n" "float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))\n" "float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))\n" "float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))\n" "float16x8_t vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))\n" "float16x8_t vmulq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))\n" "float32x4_t vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))\n" "float32x4_t vmulq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))\n" "float16x8_t vnegq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))\n" "float16x8_t vnegq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))\n" "float32x4_t vnegq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))\n" "float32x4_t vnegq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))\n" "float16x8_t vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))\n" "float16x8_t vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))\n" "float32x4_t vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))\n" "float32x4_t vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))\n" "float16x8_t vnegq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))\n" "float16x8_t vnegq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))\n" "float32x4_t vnegq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))\n" "float32x4_t vnegq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))\n" "float16x8_t vornq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))\n" "float16x8_t vornq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))\n" "float32x4_t vornq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))\n" "float32x4_t vornq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))\n" "float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))\n" "float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))\n" "float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))\n" "float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))\n" "float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))\n" "float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))\n" "float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))\n" "float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))\n" "float16x8_t vorrq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))\n" "float16x8_t vorrq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))\n" "float32x4_t vorrq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))\n" "float32x4_t vorrq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))\n" "float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))\n" "float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))\n" "float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))\n" "float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))\n" "float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))\n" "float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))\n" "float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))\n" "float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))\n" "float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))\n" "float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))\n" "float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))\n" "float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))\n" "float16x8_t vreinterpretq_f16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))\n" "float16x8_t vreinterpretq_f16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))\n" "float16x8_t vreinterpretq_f16_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))\n" "float16x8_t vreinterpretq_f16(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))\n" "float16x8_t vreinterpretq_f16_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))\n" "float16x8_t vreinterpretq_f16(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))\n" "float16x8_t vreinterpretq_f16_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))\n" "float16x8_t vreinterpretq_f16(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))\n" "float16x8_t vreinterpretq_f16_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))\n" "float16x8_t vreinterpretq_f16(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))\n" "float16x8_t vreinterpretq_f16_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))\n" "float16x8_t vreinterpretq_f16(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))\n" "float16x8_t vreinterpretq_f16_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))\n" "float16x8_t vreinterpretq_f16(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))\n" "float16x8_t vreinterpretq_f16_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))\n" "float16x8_t vreinterpretq_f16(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))\n" "float16x8_t vreinterpretq_f16_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))\n" "float16x8_t vreinterpretq_f16(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))\n" "float32x4_t vreinterpretq_f32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))\n" "float32x4_t vreinterpretq_f32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))\n" "float32x4_t vreinterpretq_f32_s16(int16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))\n" "float32x4_t vreinterpretq_f32(int16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))\n" "float32x4_t vreinterpretq_f32_s32(int32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))\n" "float32x4_t vreinterpretq_f32(int32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))\n" "float32x4_t vreinterpretq_f32_s64(int64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))\n" "float32x4_t vreinterpretq_f32(int64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))\n" "float32x4_t vreinterpretq_f32_s8(int8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))\n" "float32x4_t vreinterpretq_f32(int8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))\n" "float32x4_t vreinterpretq_f32_u16(uint16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))\n" "float32x4_t vreinterpretq_f32(uint16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))\n" "float32x4_t vreinterpretq_f32_u32(uint32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))\n" "float32x4_t vreinterpretq_f32(uint32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))\n" "float32x4_t vreinterpretq_f32_u64(uint64x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))\n" "float32x4_t vreinterpretq_f32(uint64x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))\n" "float32x4_t vreinterpretq_f32_u8(uint8x16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))\n" "float32x4_t vreinterpretq_f32(uint8x16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))\n" "int16x8_t vreinterpretq_s16_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))\n" "int16x8_t vreinterpretq_s16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))\n" "int16x8_t vreinterpretq_s16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))\n" "int16x8_t vreinterpretq_s16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))\n" "int32x4_t vreinterpretq_s32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))\n" "int32x4_t vreinterpretq_s32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))\n" "int32x4_t vreinterpretq_s32_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))\n" "int32x4_t vreinterpretq_s32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))\n" "int64x2_t vreinterpretq_s64_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))\n" "int64x2_t vreinterpretq_s64(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))\n" "int64x2_t vreinterpretq_s64_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))\n" "int64x2_t vreinterpretq_s64(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))\n" "int8x16_t vreinterpretq_s8_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))\n" "int8x16_t vreinterpretq_s8(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))\n" "int8x16_t vreinterpretq_s8_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))\n" "int8x16_t vreinterpretq_s8(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))\n" "uint16x8_t vreinterpretq_u16_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))\n" "uint16x8_t vreinterpretq_u16(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))\n" "uint16x8_t vreinterpretq_u16_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))\n" "uint16x8_t vreinterpretq_u16(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))\n" "uint32x4_t vreinterpretq_u32_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))\n" "uint32x4_t vreinterpretq_u32(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))\n" "uint32x4_t vreinterpretq_u32_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))\n" "uint32x4_t vreinterpretq_u32(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))\n" "uint64x2_t vreinterpretq_u64_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))\n" "uint64x2_t vreinterpretq_u64(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))\n" "uint64x2_t vreinterpretq_u64_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))\n" "uint64x2_t vreinterpretq_u64(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))\n" "uint8x16_t vreinterpretq_u8_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))\n" "uint8x16_t vreinterpretq_u8(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))\n" "uint8x16_t vreinterpretq_u8_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))\n" "uint8x16_t vreinterpretq_u8(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))\n" "float16x8_t vrev32q_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))\n" "float16x8_t vrev32q(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))\n" "float16x8_t vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))\n" "float16x8_t vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))\n" "float16x8_t vrev32q_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))\n" "float16x8_t vrev32q_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))\n" "float16x8_t vrev64q_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))\n" "float16x8_t vrev64q(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))\n" "float32x4_t vrev64q_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))\n" "float32x4_t vrev64q(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))\n" "float16x8_t vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))\n" "float16x8_t vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))\n" "float32x4_t vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))\n" "float32x4_t vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))\n" "float16x8_t vrev64q_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))\n" "float16x8_t vrev64q_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))\n" "float32x4_t vrev64q_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))\n" "float32x4_t vrev64q_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))\n" "float16x8_t vrndaq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))\n" "float16x8_t vrndaq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))\n" "float32x4_t vrndaq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))\n" "float32x4_t vrndaq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))\n" "float16x8_t vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))\n" "float16x8_t vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))\n" "float32x4_t vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))\n" "float32x4_t vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))\n" "float16x8_t vrndaq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))\n" "float16x8_t vrndaq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))\n" "float32x4_t vrndaq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))\n" "float32x4_t vrndaq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))\n" "float16x8_t vrndmq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))\n" "float16x8_t vrndmq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))\n" "float32x4_t vrndmq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))\n" "float32x4_t vrndmq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))\n" "float16x8_t vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))\n" "float16x8_t vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))\n" "float32x4_t vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))\n" "float32x4_t vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))\n" "float16x8_t vrndmq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))\n" "float16x8_t vrndmq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))\n" "float32x4_t vrndmq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))\n" "float32x4_t vrndmq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))\n" "float16x8_t vrndnq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))\n" "float16x8_t vrndnq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))\n" "float32x4_t vrndnq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))\n" "float32x4_t vrndnq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))\n" "float16x8_t vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))\n" "float16x8_t vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))\n" "float32x4_t vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))\n" "float32x4_t vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))\n" "float16x8_t vrndnq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))\n" "float16x8_t vrndnq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))\n" "float32x4_t vrndnq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))\n" "float32x4_t vrndnq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))\n" "float16x8_t vrndpq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))\n" "float16x8_t vrndpq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))\n" "float32x4_t vrndpq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))\n" "float32x4_t vrndpq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))\n" "float16x8_t vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))\n" "float16x8_t vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))\n" "float32x4_t vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))\n" "float32x4_t vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))\n" "float16x8_t vrndpq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))\n" "float16x8_t vrndpq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))\n" "float32x4_t vrndpq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))\n" "float32x4_t vrndpq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))\n" "float16x8_t vrndq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))\n" "float16x8_t vrndq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))\n" "float32x4_t vrndq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))\n" "float32x4_t vrndq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))\n" "float16x8_t vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))\n" "float16x8_t vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))\n" "float32x4_t vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))\n" "float32x4_t vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))\n" "float16x8_t vrndq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))\n" "float16x8_t vrndq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))\n" "float32x4_t vrndq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))\n" "float32x4_t vrndq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))\n" "float16x8_t vrndxq_f16(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))\n" "float16x8_t vrndxq(float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))\n" "float32x4_t vrndxq_f32(float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))\n" "float32x4_t vrndxq(float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))\n" "float16x8_t vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))\n" "float16x8_t vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))\n" "float32x4_t vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))\n" "float32x4_t vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))\n" "float16x8_t vrndxq_x_f16(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))\n" "float16x8_t vrndxq_x(float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))\n" "float32x4_t vrndxq_x_f32(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))\n" "float32x4_t vrndxq_x(float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))\n" "float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))\n" "float16x8_t vsetq_lane(float16_t, float16x8_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))\n" "float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))\n" "float32x4_t vsetq_lane(float32_t, float32x4_t, int);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))\n" "void vst1q_f16(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))\n" "void vst1q(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))\n" "void vst1q_f32(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))\n" "void vst1q(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))\n" "void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))\n" "void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))\n" "void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))\n" "void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))\n" "void vst2q_f16(float16_t *, float16x8x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))\n" "void vst2q(float16_t *, float16x8x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))\n" "void vst2q_f32(float32_t *, float32x4x2_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))\n" "void vst2q(float32_t *, float32x4x2_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))\n" "void vst4q_f16(float16_t *, float16x8x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))\n" "void vst4q(float16_t *, float16x8x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))\n" "void vst4q_f32(float32_t *, float32x4x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))\n" "void vst4q(float32_t *, float32x4x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))\n" "void vstrhq_f16(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))\n" "void vstrhq(float16_t *, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))\n" "void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))\n" "void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))\n" "void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))\n" "void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))\n" "void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))\n" "void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))\n" "void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))\n" "void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))\n" "void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))\n" "void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))\n" "void vstrwq_f32(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))\n" "void vstrwq(float32_t *, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))\n" "void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))\n" "void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))\n" "void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))\n" "void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))\n" "void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))\n" "void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))\n" "void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))\n" "void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))\n" "void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))\n" "void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))\n" "void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))\n" "void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))\n" "void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))\n" "void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))\n" "void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))\n" "void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))\n" "void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))\n" "void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))\n" "float16x8_t vsubq_f16(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))\n" "float16x8_t vsubq(float16x8_t, float16x8_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))\n" "float32x4_t vsubq_f32(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))\n" "float32x4_t vsubq(float32x4_t, float32x4_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))\n" "float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))\n" "float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))\n" "float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))\n" "float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))\n" "float16x8_t vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))\n" "float16x8_t vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))\n" "float32x4_t vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))\n" "float32x4_t vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))\n" "float16x8_t vsubq_n_f16(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))\n" "float16x8_t vsubq(float16x8_t, float16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))\n" "float32x4_t vsubq_n_f32(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))\n" "float32x4_t vsubq(float32x4_t, float32_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))\n" "float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))\n" "float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))\n" "float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))\n" "float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))\n" "float16x8_t vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))\n" "float16x8_t vsubq_x(float16x8_t, float16_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))\n" "float32x4_t vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))\n" "float32x4_t vsubq_x(float32x4_t, float32_t, mve_pred16_t);\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))\n" "float16x8_t vuninitializedq_f16();\n" "static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))\n" "float32x4_t vuninitializedq_f32();\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))\n" "float16x8_t vuninitializedq(float16x8_t);\n" "static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))\n" "float32x4_t vuninitializedq(float32x4_t);\n" "\n" "#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */\n" "\n" "#ifdef __cplusplus\n" "} /* extern \"C\" */\n" "#endif\n" "\n" "#endif /* __ARM_MVE_H */\n" "" } , { "/builtins/arm_neon_sve_bridge.h" , "/*===---- arm_neon_sve_bridge.h - ARM NEON SVE Bridge intrinsics -----------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_NEON_SVE_BRIDGE_H\n" "#define __ARM_NEON_SVE_BRIDGE_H\n" "\n" "#include \n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "/* Function attributes */\n" "#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))\n" "#define __aio \\\n" " static __inline__ \\\n" " __attribute__((__always_inline__, __nodebug__, __overloadable__))\n" "\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))\n" "svint8_t svset_neonq(svint8_t, int8x16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))\n" "svint16_t svset_neonq(svint16_t, int16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))\n" "svint32_t svset_neonq(svint32_t, int32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))\n" "svint64_t svset_neonq(svint64_t, int64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))\n" "svuint8_t svset_neonq(svuint8_t, uint8x16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))\n" "svuint16_t svset_neonq(svuint16_t, uint16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))\n" "svuint32_t svset_neonq(svuint32_t, uint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))\n" "svuint64_t svset_neonq(svuint64_t, uint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))\n" "svfloat16_t svset_neonq(svfloat16_t, float16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))\n" "svfloat32_t svset_neonq(svfloat32_t, float32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))\n" "svfloat64_t svset_neonq(svfloat64_t, float64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))\n" "svint8_t svset_neonq_s8(svint8_t, int8x16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))\n" "svint16_t svset_neonq_s16(svint16_t, int16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))\n" "svint32_t svset_neonq_s32(svint32_t, int32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))\n" "svint64_t svset_neonq_s64(svint64_t, int64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))\n" "svuint8_t svset_neonq_u8(svuint8_t, uint8x16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))\n" "svuint16_t svset_neonq_u16(svuint16_t, uint16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))\n" "svuint32_t svset_neonq_u32(svuint32_t, uint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))\n" "svuint64_t svset_neonq_u64(svuint64_t, uint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))\n" "svfloat16_t svset_neonq_f16(svfloat16_t, float16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))\n" "svfloat32_t svset_neonq_f32(svfloat32_t, float32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))\n" "svfloat64_t svset_neonq_f64(svfloat64_t, float64x2_t);\n" "\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))\n" "int8x16_t svget_neonq(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))\n" "int16x8_t svget_neonq(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))\n" "int32x4_t svget_neonq(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))\n" "int64x2_t svget_neonq(svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))\n" "uint8x16_t svget_neonq(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))\n" "uint16x8_t svget_neonq(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))\n" "uint32x4_t svget_neonq(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))\n" "uint64x2_t svget_neonq(svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))\n" "float16x8_t svget_neonq(svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))\n" "float32x4_t svget_neonq(svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))\n" "float64x2_t svget_neonq(svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))\n" "int8x16_t svget_neonq_s8(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))\n" "int16x8_t svget_neonq_s16(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))\n" "int32x4_t svget_neonq_s32(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))\n" "int64x2_t svget_neonq_s64(svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))\n" "uint8x16_t svget_neonq_u8(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))\n" "uint16x8_t svget_neonq_u16(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))\n" "uint32x4_t svget_neonq_u32(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))\n" "uint64x2_t svget_neonq_u64(svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))\n" "float16x8_t svget_neonq_f16(svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))\n" "float32x4_t svget_neonq_f32(svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))\n" "float64x2_t svget_neonq_f64(svfloat64_t);\n" "\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))\n" "svint8_t svdup_neonq(int8x16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))\n" "svint16_t svdup_neonq(int16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))\n" "svint32_t svdup_neonq(int32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))\n" "svint64_t svdup_neonq(int64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))\n" "svuint8_t svdup_neonq(uint8x16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))\n" "svuint16_t svdup_neonq(uint16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))\n" "svuint32_t svdup_neonq(uint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))\n" "svuint64_t svdup_neonq(uint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))\n" "svfloat16_t svdup_neonq(float16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))\n" "svfloat32_t svdup_neonq(float32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))\n" "svfloat64_t svdup_neonq(float64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))\n" "svint8_t svdup_neonq_s8(int8x16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))\n" "svint16_t svdup_neonq_s16(int16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))\n" "svint32_t svdup_neonq_s32(int32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))\n" "svint64_t svdup_neonq_s64(int64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))\n" "svuint8_t svdup_neonq_u8(uint8x16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))\n" "svuint16_t svdup_neonq_u16(uint16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))\n" "svuint32_t svdup_neonq_u32(uint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))\n" "svuint64_t svdup_neonq_u64(uint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))\n" "svfloat16_t svdup_neonq_f16(float16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))\n" "svfloat32_t svdup_neonq_f32(float32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))\n" "svfloat64_t svdup_neonq_f64(float64x2_t);\n" "\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))\n" "svbfloat16_t svset_neonq(svbfloat16_t, bfloat16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))\n" "svbfloat16_t svset_neonq_bf16(svbfloat16_t, bfloat16x8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))\n" "bfloat16x8_t svget_neonq(svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))\n" "bfloat16x8_t svget_neonq_bf16(svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))\n" "svbfloat16_t svdup_neonq(bfloat16x8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))\n" "svbfloat16_t svdup_neonq_bf16(bfloat16x8_t);\n" "\n" "#undef __ai\n" "#undef __aio\n" "\n" "#ifdef __cplusplus\n" "} // extern \"C\"\n" "#endif\n" "\n" "#endif //__ARM_NEON_SVE_BRIDGE_H\n" "" } , { "/builtins/arm_sme_draft_spec_subject_to_change.h" , "/*===---- arm_sme_draft_spec_subject_to_change.h - ARM SME intrinsics ------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_SME_H\n" "#define __ARM_SME_H\n" "\n" "#if !defined(__LITTLE_ENDIAN__)\n" "#error \"Big endian is currently not supported for arm_sme_draft_spec_subject_to_change.h\"\n" "#endif\n" "#include \n" "\n" "/* Function attributes */\n" "#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))\n" "\n" "#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__))\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svaddha_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svaddha_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svaddva_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svaddva_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsb), arm_streaming_compatible, arm_preserves_za))\n" "uint64_t svcntsb(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsd), arm_streaming_compatible, arm_preserves_za))\n" "uint64_t svcntsd(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsh), arm_streaming_compatible, arm_preserves_za))\n" "uint64_t svcntsh(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsw), arm_streaming_compatible, arm_preserves_za))\n" "uint64_t svcntsw(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za128), arm_streaming, arm_shared_za))\n" "void svld1_hor_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za16), arm_streaming, arm_shared_za))\n" "void svld1_hor_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za32), arm_streaming, arm_shared_za))\n" "void svld1_hor_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za64), arm_streaming, arm_shared_za))\n" "void svld1_hor_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za8), arm_streaming, arm_shared_za))\n" "void svld1_hor_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za128), arm_streaming, arm_shared_za))\n" "void svld1_hor_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za16), arm_streaming, arm_shared_za))\n" "void svld1_hor_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za32), arm_streaming, arm_shared_za))\n" "void svld1_hor_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za64), arm_streaming, arm_shared_za))\n" "void svld1_hor_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za8), arm_streaming, arm_shared_za))\n" "void svld1_hor_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za128), arm_streaming, arm_shared_za))\n" "void svld1_ver_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za16), arm_streaming, arm_shared_za))\n" "void svld1_ver_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za32), arm_streaming, arm_shared_za))\n" "void svld1_ver_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za64), arm_streaming, arm_shared_za))\n" "void svld1_ver_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za8), arm_streaming, arm_shared_za))\n" "void svld1_ver_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za128), arm_streaming, arm_shared_za))\n" "void svld1_ver_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za16), arm_streaming, arm_shared_za))\n" "void svld1_ver_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za32), arm_streaming, arm_shared_za))\n" "void svld1_ver_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za64), arm_streaming, arm_shared_za))\n" "void svld1_ver_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za8), arm_streaming, arm_shared_za))\n" "void svld1_ver_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_hor_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_hor_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_hor_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_hor_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_hor_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_hor_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_hor_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_hor_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_hor_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_hor_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_hor_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_hor_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_hor_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_hor_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_hor_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_hor_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_hor_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_hor_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_hor_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_hor_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_hor_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_hor_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_hor_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_hor_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_ver_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_ver_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_ver_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_ver_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_ver_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_ver_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_ver_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_ver_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_ver_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_ver_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_ver_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_ver_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_ver_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_ver_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_ver_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_ver_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_ver_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_ver_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_ver_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_ver_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_ver_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_ver_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_ver_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_ver_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za128), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za16), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za32), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za64), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za8), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za128), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za16), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za32), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za64), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za8), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_hor_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za128), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za16), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za32), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za64), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za8), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za128), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za16), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za32), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za64), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za8), arm_streaming, arm_shared_za, arm_preserves_za))\n" "void svst1_ver_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svsumopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svsumops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svusmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svusmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za8_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za8_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za8_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za8_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za), arm_streaming_compatible, arm_shared_za))\n" "void svzero_mask_za(uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za), arm_streaming_compatible, arm_shared_za))\n" "void svzero_za();\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_hor_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_hor_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_hor_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_hor_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_hor_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_hor_za128_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_hor_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_hor_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_hor_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_hor_za128_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_hor_za128_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_hor_za128_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_hor_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_hor_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_hor_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_hor_za16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_hor_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_hor_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_hor_za32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_hor_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_hor_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_hor_za64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_hor_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_hor_za8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_ver_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_ver_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_ver_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_ver_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_ver_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_ver_za128_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_ver_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_ver_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_ver_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_ver_za128_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_ver_za128_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_ver_za128_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint16_t svread_ver_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svbfloat16_t svread_ver_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat16_t svread_ver_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint16_t svread_ver_za16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint32_t svread_ver_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat32_t svread_ver_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint32_t svread_ver_za32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint64_t svread_ver_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svfloat64_t svread_ver_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint64_t svread_ver_za64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svuint8_t svread_ver_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))\n" "svint8_t svread_ver_za8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svsumopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m), arm_streaming, arm_shared_za))\n" "void svsumops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svusmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m), arm_streaming, arm_shared_za))\n" "void svusmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_hor_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m), arm_streaming, arm_shared_za))\n" "void svwrite_ver_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svaddha_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svaddha_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svaddva_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svaddva_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svsumopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svsumops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svusmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svusmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m), arm_streaming, arm_shared_za))\n" "void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m), arm_streaming, arm_shared_za))\n" "void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svsumopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m), arm_streaming, arm_shared_za))\n" "void svsumops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svusmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m), arm_streaming, arm_shared_za))\n" "void svusmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svldr_vnum_za), arm_streaming_compatible, arm_shared_za))\n" "void svldr_vnum_za(uint32_t, uint64_t, void const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_vnum_za), arm_streaming_compatible, arm_shared_za, arm_preserves_za))\n" "void svstr_vnum_za(uint32_t, uint64_t, void *);\n" "#ifdef __cplusplus\n" "} // extern \"C\"\n" "#endif\n" "\n" "#undef __ai\n" "\n" "#endif /* __ARM_SME_H */\n" "" } , { "/builtins/arm_sve.h" , "/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ARM_SVE_H\n" "#define __ARM_SVE_H\n" "\n" "#if !defined(__LITTLE_ENDIAN__)\n" "#error \"Big endian is currently not supported for arm_sve.h\"\n" "#endif\n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#else\n" "#include \n" "#endif\n" "\n" "typedef __fp16 float16_t;\n" "typedef float float32_t;\n" "typedef double float64_t;\n" "typedef __SVInt8_t svint8_t;\n" "typedef __SVInt16_t svint16_t;\n" "typedef __SVInt32_t svint32_t;\n" "typedef __SVInt64_t svint64_t;\n" "typedef __SVUint8_t svuint8_t;\n" "typedef __SVUint16_t svuint16_t;\n" "typedef __SVUint32_t svuint32_t;\n" "typedef __SVUint64_t svuint64_t;\n" "typedef __SVFloat16_t svfloat16_t;\n" "\n" "typedef __SVBFloat16_t svbfloat16_t;\n" "#include \n" "typedef __SVFloat32_t svfloat32_t;\n" "typedef __SVFloat64_t svfloat64_t;\n" "typedef __clang_svint8x2_t svint8x2_t;\n" "typedef __clang_svint16x2_t svint16x2_t;\n" "typedef __clang_svint32x2_t svint32x2_t;\n" "typedef __clang_svint64x2_t svint64x2_t;\n" "typedef __clang_svuint8x2_t svuint8x2_t;\n" "typedef __clang_svuint16x2_t svuint16x2_t;\n" "typedef __clang_svuint32x2_t svuint32x2_t;\n" "typedef __clang_svuint64x2_t svuint64x2_t;\n" "typedef __clang_svfloat16x2_t svfloat16x2_t;\n" "typedef __clang_svfloat32x2_t svfloat32x2_t;\n" "typedef __clang_svfloat64x2_t svfloat64x2_t;\n" "typedef __clang_svint8x3_t svint8x3_t;\n" "typedef __clang_svint16x3_t svint16x3_t;\n" "typedef __clang_svint32x3_t svint32x3_t;\n" "typedef __clang_svint64x3_t svint64x3_t;\n" "typedef __clang_svuint8x3_t svuint8x3_t;\n" "typedef __clang_svuint16x3_t svuint16x3_t;\n" "typedef __clang_svuint32x3_t svuint32x3_t;\n" "typedef __clang_svuint64x3_t svuint64x3_t;\n" "typedef __clang_svfloat16x3_t svfloat16x3_t;\n" "typedef __clang_svfloat32x3_t svfloat32x3_t;\n" "typedef __clang_svfloat64x3_t svfloat64x3_t;\n" "typedef __clang_svint8x4_t svint8x4_t;\n" "typedef __clang_svint16x4_t svint16x4_t;\n" "typedef __clang_svint32x4_t svint32x4_t;\n" "typedef __clang_svint64x4_t svint64x4_t;\n" "typedef __clang_svuint8x4_t svuint8x4_t;\n" "typedef __clang_svuint16x4_t svuint16x4_t;\n" "typedef __clang_svuint32x4_t svuint32x4_t;\n" "typedef __clang_svuint64x4_t svuint64x4_t;\n" "typedef __clang_svfloat16x4_t svfloat16x4_t;\n" "typedef __clang_svfloat32x4_t svfloat32x4_t;\n" "typedef __clang_svfloat64x4_t svfloat64x4_t;\n" "typedef __SVBool_t svbool_t;\n" "typedef __clang_svboolx2_t svboolx2_t;\n" "typedef __clang_svboolx4_t svboolx4_t;\n" "\n" "typedef __clang_svbfloat16x2_t svbfloat16x2_t;\n" "typedef __clang_svbfloat16x3_t svbfloat16x3_t;\n" "typedef __clang_svbfloat16x4_t svbfloat16x4_t;\n" "typedef __SVCount_t svcount_t;\n" "\n" "enum svpattern\n" "{\n" " SV_POW2 = 0,\n" " SV_VL1 = 1,\n" " SV_VL2 = 2,\n" " SV_VL3 = 3,\n" " SV_VL4 = 4,\n" " SV_VL5 = 5,\n" " SV_VL6 = 6,\n" " SV_VL7 = 7,\n" " SV_VL8 = 8,\n" " SV_VL16 = 9,\n" " SV_VL32 = 10,\n" " SV_VL64 = 11,\n" " SV_VL128 = 12,\n" " SV_VL256 = 13,\n" " SV_MUL4 = 29,\n" " SV_MUL3 = 30,\n" " SV_ALL = 31\n" "};\n" "\n" "enum svprfop\n" "{\n" " SV_PLDL1KEEP = 0,\n" " SV_PLDL1STRM = 1,\n" " SV_PLDL2KEEP = 2,\n" " SV_PLDL2STRM = 3,\n" " SV_PLDL3KEEP = 4,\n" " SV_PLDL3STRM = 5,\n" " SV_PSTL1KEEP = 8,\n" " SV_PSTL1STRM = 9,\n" " SV_PSTL2KEEP = 10,\n" " SV_PSTL2STRM = 11,\n" " SV_PSTL3KEEP = 12,\n" " SV_PSTL3STRM = 13\n" "};\n" "\n" "/* Function attributes */\n" "#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))\n" "\n" "#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__))\n" "\n" "#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__)\n" "#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__)\n" "#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__)\n" "#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__)\n" "#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__)\n" "#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__)\n" "#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__)\n" "#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__)\n" "#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__)\n" "#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__)\n" "#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__)\n" "#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__)\n" "#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__)\n" "#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__)\n" "#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__)\n" "#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__)\n" "#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__)\n" "#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__)\n" "#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__)\n" "#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__)\n" "#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__)\n" "#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__)\n" "#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__)\n" "#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__)\n" "#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__)\n" "#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__)\n" "#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__)\n" "#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__)\n" "#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__)\n" "#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__)\n" "#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__)\n" "#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__)\n" "#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__)\n" "#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__)\n" "#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__)\n" "#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__)\n" "#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__)\n" "#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__)\n" "#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__)\n" "#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__)\n" "#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__)\n" "#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__)\n" "#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__)\n" "#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__)\n" "#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__)\n" "#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__)\n" "#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__)\n" "#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__)\n" "#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__)\n" "#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__)\n" "#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__)\n" "#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__)\n" "#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__)\n" "#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__)\n" "#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__)\n" "#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__)\n" "#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__)\n" "#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__)\n" "#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__)\n" "#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__)\n" "#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__)\n" "#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__)\n" "#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__)\n" "#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__)\n" "#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__)\n" "#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__)\n" "#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__)\n" "#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__)\n" "#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__)\n" "#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__)\n" "#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__)\n" "#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__)\n" "#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__)\n" "#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__)\n" "#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__)\n" "#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__)\n" "#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__)\n" "#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__)\n" "#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__)\n" "#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__)\n" "#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__)\n" "#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__)\n" "#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__)\n" "#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__)\n" "#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__)\n" "#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__)\n" "#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__)\n" "#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__)\n" "#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__)\n" "#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__)\n" "#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__)\n" "#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__)\n" "#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__)\n" "#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__)\n" "#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__)\n" "#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__)\n" "#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__)\n" "#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__)\n" "#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__)\n" "#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__)\n" "#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__)\n" "#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__)\n" "#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__)\n" "#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__)\n" "#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__)\n" "#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__)\n" "#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__)\n" "#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__)\n" "#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__)\n" "#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__)\n" "#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__)\n" "#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__)\n" "#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__)\n" "#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__)\n" "#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__)\n" "#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__)\n" "#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__)\n" "#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__)\n" "#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__)\n" "#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__)\n" "#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__)\n" "#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__)\n" "#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__)\n" "#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__)\n" "#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__)\n" "#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__)\n" "#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__)\n" "#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__)\n" "#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__)\n" "#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__)\n" "#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__)\n" "#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__)\n" "#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__)\n" "#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__)\n" "#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__)\n" "#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__)\n" "#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__)\n" "#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__)\n" "#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__)\n" "#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__)\n" "#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__)\n" "#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__)\n" "#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__)\n" "#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__)\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svint8_t op) {\n" " return __builtin_sve_reinterpret_s8_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svint16_t op) {\n" " return __builtin_sve_reinterpret_s8_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svint32_t op) {\n" " return __builtin_sve_reinterpret_s8_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svint64_t op) {\n" " return __builtin_sve_reinterpret_s8_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svuint8_t op) {\n" " return __builtin_sve_reinterpret_s8_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svuint16_t op) {\n" " return __builtin_sve_reinterpret_s8_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svuint32_t op) {\n" " return __builtin_sve_reinterpret_s8_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svuint64_t op) {\n" " return __builtin_sve_reinterpret_s8_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_s8_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_s8_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_s8_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint8_t svreinterpret_s8(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_s8_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svint8_t op) {\n" " return __builtin_sve_reinterpret_s16_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svint16_t op) {\n" " return __builtin_sve_reinterpret_s16_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svint32_t op) {\n" " return __builtin_sve_reinterpret_s16_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svint64_t op) {\n" " return __builtin_sve_reinterpret_s16_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svuint8_t op) {\n" " return __builtin_sve_reinterpret_s16_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svuint16_t op) {\n" " return __builtin_sve_reinterpret_s16_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svuint32_t op) {\n" " return __builtin_sve_reinterpret_s16_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svuint64_t op) {\n" " return __builtin_sve_reinterpret_s16_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_s16_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_s16_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_s16_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint16_t svreinterpret_s16(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_s16_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svint8_t op) {\n" " return __builtin_sve_reinterpret_s32_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svint16_t op) {\n" " return __builtin_sve_reinterpret_s32_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svint32_t op) {\n" " return __builtin_sve_reinterpret_s32_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svint64_t op) {\n" " return __builtin_sve_reinterpret_s32_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svuint8_t op) {\n" " return __builtin_sve_reinterpret_s32_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svuint16_t op) {\n" " return __builtin_sve_reinterpret_s32_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svuint32_t op) {\n" " return __builtin_sve_reinterpret_s32_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svuint64_t op) {\n" " return __builtin_sve_reinterpret_s32_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_s32_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_s32_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_s32_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint32_t svreinterpret_s32(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_s32_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svint8_t op) {\n" " return __builtin_sve_reinterpret_s64_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svint16_t op) {\n" " return __builtin_sve_reinterpret_s64_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svint32_t op) {\n" " return __builtin_sve_reinterpret_s64_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svint64_t op) {\n" " return __builtin_sve_reinterpret_s64_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svuint8_t op) {\n" " return __builtin_sve_reinterpret_s64_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svuint16_t op) {\n" " return __builtin_sve_reinterpret_s64_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svuint32_t op) {\n" " return __builtin_sve_reinterpret_s64_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svuint64_t op) {\n" " return __builtin_sve_reinterpret_s64_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_s64_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_s64_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_s64_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svint64_t svreinterpret_s64(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_s64_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svint8_t op) {\n" " return __builtin_sve_reinterpret_u8_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svint16_t op) {\n" " return __builtin_sve_reinterpret_u8_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svint32_t op) {\n" " return __builtin_sve_reinterpret_u8_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svint64_t op) {\n" " return __builtin_sve_reinterpret_u8_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svuint8_t op) {\n" " return __builtin_sve_reinterpret_u8_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svuint16_t op) {\n" " return __builtin_sve_reinterpret_u8_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svuint32_t op) {\n" " return __builtin_sve_reinterpret_u8_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svuint64_t op) {\n" " return __builtin_sve_reinterpret_u8_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_u8_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_u8_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_u8_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint8_t svreinterpret_u8(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_u8_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svint8_t op) {\n" " return __builtin_sve_reinterpret_u16_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svint16_t op) {\n" " return __builtin_sve_reinterpret_u16_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svint32_t op) {\n" " return __builtin_sve_reinterpret_u16_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svint64_t op) {\n" " return __builtin_sve_reinterpret_u16_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svuint8_t op) {\n" " return __builtin_sve_reinterpret_u16_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svuint16_t op) {\n" " return __builtin_sve_reinterpret_u16_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svuint32_t op) {\n" " return __builtin_sve_reinterpret_u16_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svuint64_t op) {\n" " return __builtin_sve_reinterpret_u16_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_u16_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_u16_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_u16_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint16_t svreinterpret_u16(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_u16_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svint8_t op) {\n" " return __builtin_sve_reinterpret_u32_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svint16_t op) {\n" " return __builtin_sve_reinterpret_u32_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svint32_t op) {\n" " return __builtin_sve_reinterpret_u32_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svint64_t op) {\n" " return __builtin_sve_reinterpret_u32_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svuint8_t op) {\n" " return __builtin_sve_reinterpret_u32_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svuint16_t op) {\n" " return __builtin_sve_reinterpret_u32_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svuint32_t op) {\n" " return __builtin_sve_reinterpret_u32_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svuint64_t op) {\n" " return __builtin_sve_reinterpret_u32_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_u32_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_u32_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_u32_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint32_t svreinterpret_u32(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_u32_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svint8_t op) {\n" " return __builtin_sve_reinterpret_u64_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svint16_t op) {\n" " return __builtin_sve_reinterpret_u64_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svint32_t op) {\n" " return __builtin_sve_reinterpret_u64_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svint64_t op) {\n" " return __builtin_sve_reinterpret_u64_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svuint8_t op) {\n" " return __builtin_sve_reinterpret_u64_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svuint16_t op) {\n" " return __builtin_sve_reinterpret_u64_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svuint32_t op) {\n" " return __builtin_sve_reinterpret_u64_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svuint64_t op) {\n" " return __builtin_sve_reinterpret_u64_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_u64_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_u64_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_u64_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svuint64_t svreinterpret_u64(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_u64_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svint8_t op) {\n" " return __builtin_sve_reinterpret_f16_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svint16_t op) {\n" " return __builtin_sve_reinterpret_f16_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svint32_t op) {\n" " return __builtin_sve_reinterpret_f16_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svint64_t op) {\n" " return __builtin_sve_reinterpret_f16_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svuint8_t op) {\n" " return __builtin_sve_reinterpret_f16_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svuint16_t op) {\n" " return __builtin_sve_reinterpret_f16_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svuint32_t op) {\n" " return __builtin_sve_reinterpret_f16_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svuint64_t op) {\n" " return __builtin_sve_reinterpret_f16_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_f16_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_f16_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_f16_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat16_t svreinterpret_f16(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_f16_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svint8_t op) {\n" " return __builtin_sve_reinterpret_bf16_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svint16_t op) {\n" " return __builtin_sve_reinterpret_bf16_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svint32_t op) {\n" " return __builtin_sve_reinterpret_bf16_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svint64_t op) {\n" " return __builtin_sve_reinterpret_bf16_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svuint8_t op) {\n" " return __builtin_sve_reinterpret_bf16_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svuint16_t op) {\n" " return __builtin_sve_reinterpret_bf16_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svuint32_t op) {\n" " return __builtin_sve_reinterpret_bf16_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svuint64_t op) {\n" " return __builtin_sve_reinterpret_bf16_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_bf16_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_bf16_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_bf16_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svbfloat16_t svreinterpret_bf16(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_bf16_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svint8_t op) {\n" " return __builtin_sve_reinterpret_f32_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svint16_t op) {\n" " return __builtin_sve_reinterpret_f32_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svint32_t op) {\n" " return __builtin_sve_reinterpret_f32_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svint64_t op) {\n" " return __builtin_sve_reinterpret_f32_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svuint8_t op) {\n" " return __builtin_sve_reinterpret_f32_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svuint16_t op) {\n" " return __builtin_sve_reinterpret_f32_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svuint32_t op) {\n" " return __builtin_sve_reinterpret_f32_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svuint64_t op) {\n" " return __builtin_sve_reinterpret_f32_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_f32_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_f32_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_f32_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat32_t svreinterpret_f32(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_f32_f64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svint8_t op) {\n" " return __builtin_sve_reinterpret_f64_s8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svint16_t op) {\n" " return __builtin_sve_reinterpret_f64_s16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svint32_t op) {\n" " return __builtin_sve_reinterpret_f64_s32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svint64_t op) {\n" " return __builtin_sve_reinterpret_f64_s64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svuint8_t op) {\n" " return __builtin_sve_reinterpret_f64_u8(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svuint16_t op) {\n" " return __builtin_sve_reinterpret_f64_u16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svuint32_t op) {\n" " return __builtin_sve_reinterpret_f64_u32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svuint64_t op) {\n" " return __builtin_sve_reinterpret_f64_u64(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svfloat16_t op) {\n" " return __builtin_sve_reinterpret_f64_f16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svbfloat16_t op) {\n" " return __builtin_sve_reinterpret_f64_bf16(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svfloat32_t op) {\n" " return __builtin_sve_reinterpret_f64_f32(op);\n" "}\n" "\n" "__aio __attribute__((target(\"sve\"))) svfloat64_t svreinterpret_f64(svfloat64_t op) {\n" " return __builtin_sve_reinterpret_f64_f64(op);\n" "}\n" "\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))\n" "svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))\n" "svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))\n" "svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))\n" "svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))\n" "svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))\n" "svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))\n" "svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))\n" "svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))\n" "svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))\n" "svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))\n" "svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))\n" "svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))\n" "svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))\n" "svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))\n" "svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))\n" "svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))\n" "svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))\n" "svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))\n" "svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))\n" "svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))\n" "svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))\n" "svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))\n" "svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))\n" "svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))\n" "svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))\n" "svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))\n" "svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))\n" "svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))\n" "svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))\n" "svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))\n" "svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))\n" "svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))\n" "svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))\n" "svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))\n" "svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))\n" "svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))\n" "svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))\n" "svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))\n" "svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))\n" "svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))\n" "svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))\n" "svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))\n" "svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))\n" "svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))\n" "svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))\n" "svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))\n" "svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))\n" "svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))\n" "svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))\n" "svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))\n" "svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))\n" "svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))\n" "svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))\n" "svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))\n" "svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))\n" "svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))\n" "svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))\n" "svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))\n" "svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))\n" "svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))\n" "svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))\n" "svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))\n" "svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))\n" "svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))\n" "svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))\n" "svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))\n" "svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))\n" "svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))\n" "svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))\n" "svfloat64_t svabs_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))\n" "svfloat32_t svabs_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))\n" "svfloat16_t svabs_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))\n" "svfloat64_t svabs_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))\n" "svfloat32_t svabs_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))\n" "svfloat16_t svabs_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))\n" "svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))\n" "svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))\n" "svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))\n" "svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))\n" "svint8_t svabs_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))\n" "svint32_t svabs_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))\n" "svint64_t svabs_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))\n" "svint16_t svabs_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))\n" "svint8_t svabs_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))\n" "svint32_t svabs_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))\n" "svint64_t svabs_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))\n" "svint16_t svabs_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))\n" "svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))\n" "svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))\n" "svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))\n" "svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))\n" "svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))\n" "svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))\n" "svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))\n" "svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))\n" "svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))\n" "svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))\n" "svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))\n" "svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))\n" "svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))\n" "svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))\n" "svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))\n" "svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))\n" "svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))\n" "svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))\n" "svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))\n" "svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))\n" "svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))\n" "svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))\n" "svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))\n" "svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))\n" "svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))\n" "svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))\n" "svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))\n" "svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))\n" "svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))\n" "svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))\n" "svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))\n" "svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))\n" "svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))\n" "svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))\n" "svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))\n" "svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))\n" "svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))\n" "svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))\n" "svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))\n" "svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))\n" "svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))\n" "svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))\n" "svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))\n" "svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))\n" "svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))\n" "svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))\n" "svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))\n" "svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))\n" "svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))\n" "svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))\n" "svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))\n" "svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))\n" "svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))\n" "svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))\n" "svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))\n" "svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))\n" "svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))\n" "svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))\n" "svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))\n" "svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))\n" "svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))\n" "svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))\n" "svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))\n" "svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))\n" "svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))\n" "svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))\n" "svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))\n" "svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))\n" "svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))\n" "svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))\n" "svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))\n" "svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))\n" "svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))\n" "svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))\n" "svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))\n" "svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))\n" "svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))\n" "svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))\n" "svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))\n" "svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))\n" "svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))\n" "svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))\n" "svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))\n" "svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))\n" "svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))\n" "svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))\n" "svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))\n" "svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))\n" "svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))\n" "svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))\n" "float64_t svadda_f64(svbool_t, float64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))\n" "float32_t svadda_f32(svbool_t, float32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))\n" "float16_t svadda_f16(svbool_t, float16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))\n" "int64_t svaddv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))\n" "int64_t svaddv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))\n" "int64_t svaddv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))\n" "int64_t svaddv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))\n" "uint64_t svaddv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))\n" "uint64_t svaddv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))\n" "uint64_t svaddv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))\n" "uint64_t svaddv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))\n" "float64_t svaddv_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))\n" "float32_t svaddv_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))\n" "float16_t svaddv_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))\n" "svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))\n" "svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))\n" "svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))\n" "svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))\n" "svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))\n" "svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))\n" "svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))\n" "svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))\n" "svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))\n" "svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))\n" "svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))\n" "svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))\n" "svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))\n" "svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))\n" "svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))\n" "svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))\n" "svbool_t svand_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))\n" "svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))\n" "svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))\n" "svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))\n" "svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))\n" "svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))\n" "svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))\n" "svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))\n" "svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))\n" "svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))\n" "svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))\n" "svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))\n" "svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))\n" "svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))\n" "svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))\n" "svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))\n" "svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))\n" "svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))\n" "svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))\n" "svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))\n" "svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))\n" "svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))\n" "svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))\n" "svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))\n" "svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))\n" "svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))\n" "svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))\n" "svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))\n" "svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))\n" "svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))\n" "svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))\n" "svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))\n" "svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))\n" "svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))\n" "svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))\n" "svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))\n" "svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))\n" "svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))\n" "svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))\n" "svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))\n" "svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))\n" "svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))\n" "svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))\n" "svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))\n" "svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))\n" "svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))\n" "svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))\n" "svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))\n" "svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))\n" "uint8_t svandv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))\n" "uint32_t svandv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))\n" "uint64_t svandv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))\n" "uint16_t svandv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))\n" "int8_t svandv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))\n" "int32_t svandv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))\n" "int64_t svandv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))\n" "int16_t svandv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))\n" "svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))\n" "svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))\n" "svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))\n" "svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))\n" "svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))\n" "svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))\n" "svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))\n" "svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))\n" "svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))\n" "svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))\n" "svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))\n" "svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))\n" "svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))\n" "svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))\n" "svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))\n" "svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))\n" "svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))\n" "svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))\n" "svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))\n" "svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))\n" "svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))\n" "svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))\n" "svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))\n" "svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))\n" "svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))\n" "svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))\n" "svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))\n" "svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))\n" "svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))\n" "svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))\n" "svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))\n" "svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))\n" "svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))\n" "svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))\n" "svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))\n" "svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))\n" "svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))\n" "svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))\n" "svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))\n" "svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))\n" "svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))\n" "svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))\n" "svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))\n" "svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))\n" "svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))\n" "svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))\n" "svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))\n" "svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))\n" "svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))\n" "svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))\n" "svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))\n" "svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))\n" "svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))\n" "svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))\n" "svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))\n" "svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))\n" "svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))\n" "svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))\n" "svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))\n" "svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))\n" "svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))\n" "svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))\n" "svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))\n" "svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))\n" "svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))\n" "svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))\n" "svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))\n" "svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))\n" "svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))\n" "svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))\n" "svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))\n" "svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))\n" "svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))\n" "svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))\n" "svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))\n" "svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))\n" "svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))\n" "svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))\n" "svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))\n" "svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))\n" "svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))\n" "svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))\n" "svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))\n" "svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))\n" "svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))\n" "svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))\n" "svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))\n" "svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))\n" "svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))\n" "svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))\n" "svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))\n" "svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))\n" "svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))\n" "svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))\n" "svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))\n" "svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))\n" "svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))\n" "svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))\n" "svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))\n" "svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))\n" "svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))\n" "svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))\n" "svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))\n" "svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))\n" "svbool_t svbrka_b_z(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))\n" "svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))\n" "svbool_t svbrkb_b_z(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))\n" "svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))\n" "svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))\n" "svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))\n" "svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))\n" "svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))\n" "svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))\n" "svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))\n" "svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))\n" "svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))\n" "svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))\n" "svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))\n" "svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))\n" "uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))\n" "uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))\n" "uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))\n" "uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))\n" "int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))\n" "float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))\n" "float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))\n" "float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))\n" "int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))\n" "int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))\n" "int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))\n" "svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))\n" "svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))\n" "svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))\n" "svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))\n" "svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))\n" "svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))\n" "svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))\n" "svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))\n" "svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))\n" "svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))\n" "svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))\n" "uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))\n" "uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))\n" "uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))\n" "uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))\n" "int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))\n" "float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))\n" "float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))\n" "float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))\n" "int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))\n" "int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))\n" "int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))\n" "svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))\n" "svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))\n" "svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))\n" "svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))\n" "svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))\n" "svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))\n" "svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))\n" "svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))\n" "svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))\n" "svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))\n" "svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))\n" "svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))\n" "svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))\n" "svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))\n" "svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))\n" "svuint8_t svcls_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))\n" "svuint32_t svcls_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))\n" "svuint64_t svcls_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))\n" "svuint16_t svcls_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))\n" "svuint8_t svcls_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))\n" "svuint32_t svcls_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))\n" "svuint64_t svcls_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))\n" "svuint16_t svcls_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))\n" "svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))\n" "svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))\n" "svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))\n" "svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))\n" "svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))\n" "svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))\n" "svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))\n" "svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))\n" "svuint8_t svclz_u8_x(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))\n" "svuint32_t svclz_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))\n" "svuint64_t svclz_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))\n" "svuint16_t svclz_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))\n" "svuint8_t svclz_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))\n" "svuint32_t svclz_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))\n" "svuint64_t svclz_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))\n" "svuint16_t svclz_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))\n" "svuint8_t svclz_u8_z(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))\n" "svuint32_t svclz_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))\n" "svuint64_t svclz_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))\n" "svuint16_t svclz_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))\n" "svuint8_t svclz_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))\n" "svuint32_t svclz_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))\n" "svuint64_t svclz_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))\n" "svuint16_t svclz_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))\n" "svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))\n" "svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))\n" "svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))\n" "svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))\n" "svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))\n" "svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))\n" "svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))\n" "svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))\n" "svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))\n" "svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))\n" "svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))\n" "svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))\n" "svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))\n" "svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))\n" "svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))\n" "svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))\n" "svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))\n" "svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))\n" "svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))\n" "svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))\n" "svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))\n" "svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))\n" "svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))\n" "svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))\n" "svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))\n" "svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))\n" "svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))\n" "svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))\n" "svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))\n" "svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))\n" "svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))\n" "svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))\n" "svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))\n" "svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))\n" "svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))\n" "svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))\n" "svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))\n" "svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))\n" "svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))\n" "svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))\n" "svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))\n" "svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))\n" "svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))\n" "svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))\n" "svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))\n" "svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))\n" "svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))\n" "svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))\n" "svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))\n" "svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))\n" "svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))\n" "svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))\n" "svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))\n" "svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))\n" "svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))\n" "svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))\n" "svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))\n" "svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))\n" "svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))\n" "svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))\n" "svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))\n" "svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))\n" "svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))\n" "svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))\n" "svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))\n" "svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))\n" "svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))\n" "svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))\n" "svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))\n" "svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))\n" "svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))\n" "svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))\n" "svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))\n" "svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))\n" "svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))\n" "svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))\n" "svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))\n" "svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))\n" "svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))\n" "svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))\n" "svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))\n" "svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))\n" "svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))\n" "svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))\n" "svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))\n" "svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))\n" "svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))\n" "svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))\n" "svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))\n" "svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))\n" "svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))\n" "svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))\n" "svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))\n" "svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))\n" "svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))\n" "svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))\n" "svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))\n" "svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))\n" "svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))\n" "svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))\n" "svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))\n" "svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))\n" "svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))\n" "svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))\n" "svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))\n" "svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))\n" "svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))\n" "svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))\n" "svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))\n" "svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))\n" "svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))\n" "svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))\n" "svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))\n" "svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))\n" "svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))\n" "svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))\n" "svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))\n" "svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))\n" "svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))\n" "svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))\n" "svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))\n" "svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))\n" "svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))\n" "svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))\n" "svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))\n" "svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))\n" "svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))\n" "svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))\n" "svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))\n" "svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))\n" "svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))\n" "svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))\n" "svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))\n" "svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))\n" "svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))\n" "svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))\n" "svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))\n" "svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))\n" "svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))\n" "svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))\n" "svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))\n" "svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))\n" "svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))\n" "svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))\n" "svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))\n" "svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))\n" "svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))\n" "svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))\n" "svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))\n" "svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))\n" "svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))\n" "svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))\n" "svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))\n" "svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))\n" "svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))\n" "svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))\n" "svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))\n" "svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))\n" "svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))\n" "svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))\n" "svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))\n" "svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))\n" "svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))\n" "svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))\n" "svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))\n" "svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))\n" "svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))\n" "svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))\n" "svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))\n" "svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))\n" "svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))\n" "svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))\n" "svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))\n" "svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))\n" "svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))\n" "svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))\n" "svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))\n" "svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))\n" "svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))\n" "svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))\n" "svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))\n" "svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))\n" "svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))\n" "svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))\n" "svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))\n" "svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))\n" "svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))\n" "svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))\n" "svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))\n" "svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))\n" "svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))\n" "svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))\n" "svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))\n" "svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))\n" "svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))\n" "svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))\n" "svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))\n" "svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))\n" "svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))\n" "svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))\n" "svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))\n" "svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))\n" "svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))\n" "svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))\n" "svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))\n" "svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))\n" "svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))\n" "svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))\n" "svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))\n" "svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))\n" "svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))\n" "svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))\n" "svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))\n" "svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))\n" "svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))\n" "svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))\n" "svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))\n" "svuint8_t svcnot_u8_x(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))\n" "svuint32_t svcnot_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))\n" "svuint64_t svcnot_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))\n" "svuint16_t svcnot_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))\n" "svint8_t svcnot_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))\n" "svint32_t svcnot_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))\n" "svint64_t svcnot_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))\n" "svint16_t svcnot_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))\n" "svuint8_t svcnot_u8_z(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))\n" "svuint32_t svcnot_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))\n" "svuint64_t svcnot_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))\n" "svuint16_t svcnot_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))\n" "svint8_t svcnot_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))\n" "svint32_t svcnot_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))\n" "svint64_t svcnot_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))\n" "svint16_t svcnot_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))\n" "svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))\n" "svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))\n" "svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))\n" "svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))\n" "svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))\n" "svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))\n" "svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))\n" "svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))\n" "svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))\n" "svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))\n" "svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))\n" "svuint8_t svcnt_u8_x(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))\n" "svuint32_t svcnt_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))\n" "svuint64_t svcnt_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))\n" "svuint16_t svcnt_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))\n" "svuint8_t svcnt_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))\n" "svuint64_t svcnt_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))\n" "svuint32_t svcnt_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))\n" "svuint16_t svcnt_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))\n" "svuint32_t svcnt_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))\n" "svuint64_t svcnt_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))\n" "svuint16_t svcnt_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))\n" "svuint8_t svcnt_u8_z(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))\n" "svuint32_t svcnt_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))\n" "svuint64_t svcnt_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))\n" "svuint16_t svcnt_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))\n" "svuint8_t svcnt_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))\n" "svuint64_t svcnt_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))\n" "svuint32_t svcnt_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))\n" "svuint16_t svcnt_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))\n" "svuint32_t svcnt_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))\n" "svuint64_t svcnt_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))\n" "svuint16_t svcnt_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb)))\n" "uint64_t svcntb(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat)))\n" "uint64_t svcntb_pat(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd)))\n" "uint64_t svcntd(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat)))\n" "uint64_t svcntd_pat(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth)))\n" "uint64_t svcnth(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat)))\n" "uint64_t svcnth_pat(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8)))\n" "uint64_t svcntp_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32)))\n" "uint64_t svcntp_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64)))\n" "uint64_t svcntp_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16)))\n" "uint64_t svcntp_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw)))\n" "uint64_t svcntw(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat)))\n" "uint64_t svcntw_pat(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))\n" "svuint32_t svcompact_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))\n" "svuint64_t svcompact_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))\n" "svfloat64_t svcompact_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))\n" "svfloat32_t svcompact_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))\n" "svint32_t svcompact_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))\n" "svint64_t svcompact_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))\n" "svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))\n" "svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))\n" "svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))\n" "svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))\n" "svint8x2_t svcreate2_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))\n" "svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))\n" "svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))\n" "svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))\n" "svint32x2_t svcreate2_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))\n" "svint64x2_t svcreate2_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))\n" "svint16x2_t svcreate2_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))\n" "svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))\n" "svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))\n" "svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))\n" "svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))\n" "svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))\n" "svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))\n" "svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))\n" "svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))\n" "svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))\n" "svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))\n" "svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))\n" "svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))\n" "svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))\n" "svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))\n" "svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))\n" "svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))\n" "svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))\n" "svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))\n" "svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))\n" "svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))\n" "svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))\n" "svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))\n" "svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))\n" "svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))\n" "svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))\n" "svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))\n" "svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))\n" "svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))\n" "svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))\n" "svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))\n" "svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))\n" "svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))\n" "svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))\n" "svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))\n" "svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))\n" "svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))\n" "svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))\n" "svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))\n" "svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))\n" "svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))\n" "svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))\n" "svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))\n" "svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))\n" "svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))\n" "svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))\n" "svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))\n" "svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))\n" "svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))\n" "svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))\n" "svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))\n" "svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))\n" "svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))\n" "svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))\n" "svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))\n" "svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))\n" "svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))\n" "svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))\n" "svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))\n" "svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))\n" "svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))\n" "svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))\n" "svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))\n" "svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))\n" "svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))\n" "svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))\n" "svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))\n" "svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))\n" "svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))\n" "svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))\n" "svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))\n" "svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))\n" "svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))\n" "svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))\n" "svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))\n" "svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))\n" "svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))\n" "svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))\n" "svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))\n" "svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))\n" "svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))\n" "svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))\n" "svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))\n" "svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))\n" "svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))\n" "svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))\n" "svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))\n" "svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))\n" "svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))\n" "svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))\n" "svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))\n" "svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))\n" "svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))\n" "svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))\n" "svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))\n" "svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))\n" "svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))\n" "svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))\n" "svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))\n" "svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))\n" "svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))\n" "svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))\n" "svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))\n" "svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))\n" "svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))\n" "svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))\n" "svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))\n" "svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))\n" "svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))\n" "svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))\n" "svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))\n" "svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))\n" "svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))\n" "svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))\n" "svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))\n" "svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))\n" "svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))\n" "svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))\n" "svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))\n" "svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))\n" "svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))\n" "svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))\n" "svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))\n" "svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))\n" "svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))\n" "svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))\n" "svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))\n" "svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))\n" "svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))\n" "svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))\n" "svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))\n" "svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))\n" "svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))\n" "svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))\n" "svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))\n" "svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))\n" "svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))\n" "svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))\n" "svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))\n" "svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))\n" "svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))\n" "svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))\n" "svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))\n" "svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))\n" "svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))\n" "svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))\n" "svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))\n" "svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))\n" "svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))\n" "svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))\n" "svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))\n" "svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))\n" "svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))\n" "svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))\n" "svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))\n" "svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))\n" "svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))\n" "svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))\n" "svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))\n" "svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))\n" "svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))\n" "svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))\n" "svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))\n" "svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))\n" "svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))\n" "svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))\n" "svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))\n" "svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))\n" "svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))\n" "svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))\n" "svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))\n" "svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))\n" "svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))\n" "svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))\n" "svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))\n" "svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))\n" "svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))\n" "svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))\n" "svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))\n" "svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))\n" "svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))\n" "svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))\n" "svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))\n" "svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))\n" "svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))\n" "svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))\n" "svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))\n" "svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))\n" "svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))\n" "svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))\n" "svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))\n" "svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))\n" "svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))\n" "svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))\n" "svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))\n" "svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))\n" "svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))\n" "svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))\n" "svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))\n" "svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))\n" "svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))\n" "svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))\n" "svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))\n" "svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))\n" "svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))\n" "svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))\n" "svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))\n" "svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))\n" "svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))\n" "svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))\n" "svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))\n" "svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))\n" "svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))\n" "svint32_t svdot_s32(svint32_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))\n" "svint64_t svdot_s64(svint64_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))\n" "svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))\n" "svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))\n" "svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))\n" "svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))\n" "svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))\n" "svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))\n" "svuint8_t svdup_n_u8(uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))\n" "svuint32_t svdup_n_u32(uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))\n" "svuint64_t svdup_n_u64(uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))\n" "svuint16_t svdup_n_u16(uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))\n" "svint8_t svdup_n_s8(int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))\n" "svfloat64_t svdup_n_f64(float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))\n" "svfloat32_t svdup_n_f32(float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))\n" "svfloat16_t svdup_n_f16(float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))\n" "svint32_t svdup_n_s32(int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))\n" "svint64_t svdup_n_s64(int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))\n" "svint16_t svdup_n_s16(int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))\n" "svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))\n" "svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))\n" "svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))\n" "svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))\n" "svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))\n" "svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))\n" "svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))\n" "svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))\n" "svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))\n" "svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))\n" "svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))\n" "svbool_t svdup_n_b8(bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))\n" "svbool_t svdup_n_b32(bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))\n" "svbool_t svdup_n_b64(bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))\n" "svbool_t svdup_n_b16(bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))\n" "svuint8_t svdup_n_u8_x(svbool_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))\n" "svuint32_t svdup_n_u32_x(svbool_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))\n" "svuint64_t svdup_n_u64_x(svbool_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))\n" "svuint16_t svdup_n_u16_x(svbool_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))\n" "svint8_t svdup_n_s8_x(svbool_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))\n" "svfloat64_t svdup_n_f64_x(svbool_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))\n" "svfloat32_t svdup_n_f32_x(svbool_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))\n" "svfloat16_t svdup_n_f16_x(svbool_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))\n" "svint32_t svdup_n_s32_x(svbool_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))\n" "svint64_t svdup_n_s64_x(svbool_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))\n" "svint16_t svdup_n_s16_x(svbool_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))\n" "svuint8_t svdup_n_u8_z(svbool_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))\n" "svuint32_t svdup_n_u32_z(svbool_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))\n" "svuint64_t svdup_n_u64_z(svbool_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))\n" "svuint16_t svdup_n_u16_z(svbool_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))\n" "svint8_t svdup_n_s8_z(svbool_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))\n" "svfloat64_t svdup_n_f64_z(svbool_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))\n" "svfloat32_t svdup_n_f32_z(svbool_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))\n" "svfloat16_t svdup_n_f16_z(svbool_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))\n" "svint32_t svdup_n_s32_z(svbool_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))\n" "svint64_t svdup_n_s64_z(svbool_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))\n" "svint16_t svdup_n_s16_z(svbool_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))\n" "svuint8_t svdup_lane_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))\n" "svuint32_t svdup_lane_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))\n" "svuint64_t svdup_lane_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))\n" "svuint16_t svdup_lane_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))\n" "svint8_t svdup_lane_s8(svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))\n" "svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))\n" "svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))\n" "svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))\n" "svint32_t svdup_lane_s32(svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))\n" "svint64_t svdup_lane_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))\n" "svint16_t svdup_lane_s16(svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))\n" "svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))\n" "svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))\n" "svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))\n" "svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))\n" "svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))\n" "svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))\n" "svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))\n" "svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))\n" "svuint64_t svdupq_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))\n" "svfloat64_t svdupq_n_f64(float64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))\n" "svint64_t svdupq_n_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))\n" "svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))\n" "svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))\n" "svbool_t svdupq_n_b32(bool, bool, bool, bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))\n" "svbool_t svdupq_n_b64(bool, bool);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))\n" "svuint8_t svdupq_lane_u8(svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))\n" "svuint32_t svdupq_lane_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))\n" "svuint64_t svdupq_lane_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))\n" "svuint16_t svdupq_lane_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))\n" "svint8_t svdupq_lane_s8(svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))\n" "svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))\n" "svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))\n" "svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))\n" "svint32_t svdupq_lane_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))\n" "svint64_t svdupq_lane_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))\n" "svint16_t svdupq_lane_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))\n" "svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))\n" "svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))\n" "svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))\n" "svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))\n" "svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))\n" "svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))\n" "svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))\n" "svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))\n" "svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))\n" "svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))\n" "svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))\n" "svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))\n" "svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))\n" "svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))\n" "svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))\n" "svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))\n" "svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))\n" "svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))\n" "svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))\n" "svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))\n" "svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))\n" "svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))\n" "svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))\n" "svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))\n" "svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))\n" "svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))\n" "svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))\n" "svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))\n" "svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))\n" "svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))\n" "svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))\n" "svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))\n" "svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))\n" "svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))\n" "svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))\n" "svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))\n" "svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))\n" "svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))\n" "svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))\n" "svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))\n" "svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))\n" "svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))\n" "svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))\n" "svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))\n" "svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))\n" "svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))\n" "svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))\n" "svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))\n" "svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))\n" "uint8_t sveorv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))\n" "uint32_t sveorv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))\n" "uint64_t sveorv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))\n" "uint16_t sveorv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))\n" "int8_t sveorv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))\n" "int32_t sveorv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))\n" "int64_t sveorv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))\n" "int16_t sveorv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))\n" "svfloat64_t svexpa_f64(svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))\n" "svfloat32_t svexpa_f32(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))\n" "svfloat16_t svexpa_f16(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))\n" "svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))\n" "svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))\n" "svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))\n" "svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))\n" "svint8_t svext_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))\n" "svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))\n" "svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))\n" "svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))\n" "svint32_t svext_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))\n" "svint64_t svext_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))\n" "svint16_t svext_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))\n" "svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))\n" "svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))\n" "svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))\n" "svint32_t svextb_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))\n" "svint64_t svextb_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))\n" "svint16_t svextb_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))\n" "svint32_t svextb_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))\n" "svint64_t svextb_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))\n" "svint16_t svextb_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))\n" "svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))\n" "svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))\n" "svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))\n" "svuint32_t svextb_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))\n" "svuint64_t svextb_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))\n" "svuint16_t svextb_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))\n" "svuint32_t svextb_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))\n" "svuint64_t svextb_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))\n" "svuint16_t svextb_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))\n" "svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))\n" "svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))\n" "svint32_t svexth_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))\n" "svint64_t svexth_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))\n" "svint32_t svexth_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))\n" "svint64_t svexth_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))\n" "svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))\n" "svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))\n" "svuint32_t svexth_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))\n" "svuint64_t svexth_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))\n" "svuint32_t svexth_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))\n" "svuint64_t svexth_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))\n" "svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))\n" "svint64_t svextw_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))\n" "svint64_t svextw_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))\n" "svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))\n" "svuint64_t svextw_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))\n" "svuint64_t svextw_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))\n" "svuint8_t svget2_u8(svuint8x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))\n" "svuint32_t svget2_u32(svuint32x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))\n" "svuint64_t svget2_u64(svuint64x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))\n" "svuint16_t svget2_u16(svuint16x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))\n" "svint8_t svget2_s8(svint8x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))\n" "svfloat64_t svget2_f64(svfloat64x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))\n" "svfloat32_t svget2_f32(svfloat32x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))\n" "svfloat16_t svget2_f16(svfloat16x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))\n" "svint32_t svget2_s32(svint32x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))\n" "svint64_t svget2_s64(svint64x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))\n" "svint16_t svget2_s16(svint16x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))\n" "svuint8_t svget3_u8(svuint8x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))\n" "svuint32_t svget3_u32(svuint32x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))\n" "svuint64_t svget3_u64(svuint64x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))\n" "svuint16_t svget3_u16(svuint16x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))\n" "svint8_t svget3_s8(svint8x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))\n" "svfloat64_t svget3_f64(svfloat64x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))\n" "svfloat32_t svget3_f32(svfloat32x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))\n" "svfloat16_t svget3_f16(svfloat16x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))\n" "svint32_t svget3_s32(svint32x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))\n" "svint64_t svget3_s64(svint64x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))\n" "svint16_t svget3_s16(svint16x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))\n" "svuint8_t svget4_u8(svuint8x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))\n" "svuint32_t svget4_u32(svuint32x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))\n" "svuint64_t svget4_u64(svuint64x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))\n" "svuint16_t svget4_u16(svuint16x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))\n" "svint8_t svget4_s8(svint8x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))\n" "svfloat64_t svget4_f64(svfloat64x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))\n" "svfloat32_t svget4_f32(svfloat32x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))\n" "svfloat16_t svget4_f16(svfloat16x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))\n" "svint32_t svget4_s32(svint32x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))\n" "svint64_t svget4_s64(svint64x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))\n" "svint16_t svget4_s16(svint16x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8)))\n" "svuint8_t svindex_u8(uint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32)))\n" "svuint32_t svindex_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64)))\n" "svuint64_t svindex_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16)))\n" "svuint16_t svindex_u16(uint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8)))\n" "svint8_t svindex_s8(int8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32)))\n" "svint32_t svindex_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64)))\n" "svint64_t svindex_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16)))\n" "svint16_t svindex_s16(int16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))\n" "svuint8_t svinsr_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))\n" "svuint32_t svinsr_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))\n" "svuint64_t svinsr_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))\n" "svuint16_t svinsr_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))\n" "svint8_t svinsr_n_s8(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))\n" "svfloat64_t svinsr_n_f64(svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))\n" "svfloat32_t svinsr_n_f32(svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))\n" "svfloat16_t svinsr_n_f16(svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))\n" "svint32_t svinsr_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))\n" "svint64_t svinsr_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))\n" "svint16_t svinsr_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))\n" "uint8_t svlasta_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))\n" "uint32_t svlasta_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))\n" "uint64_t svlasta_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))\n" "uint16_t svlasta_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))\n" "int8_t svlasta_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))\n" "float64_t svlasta_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))\n" "float32_t svlasta_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))\n" "float16_t svlasta_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))\n" "int32_t svlasta_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))\n" "int64_t svlasta_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))\n" "int16_t svlasta_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))\n" "uint8_t svlastb_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))\n" "uint32_t svlastb_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))\n" "uint64_t svlastb_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))\n" "uint16_t svlastb_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))\n" "int8_t svlastb_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))\n" "float64_t svlastb_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))\n" "float32_t svlastb_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))\n" "float16_t svlastb_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))\n" "int32_t svlastb_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))\n" "int64_t svlastb_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))\n" "int16_t svlastb_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))\n" "svuint8_t svld1_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))\n" "svuint32_t svld1_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))\n" "svuint64_t svld1_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))\n" "svuint16_t svld1_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))\n" "svint8_t svld1_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))\n" "svfloat64_t svld1_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))\n" "svfloat32_t svld1_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))\n" "svfloat16_t svld1_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))\n" "svint32_t svld1_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))\n" "svint64_t svld1_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))\n" "svint16_t svld1_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))\n" "svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))\n" "svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))\n" "svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))\n" "svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))\n" "svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))\n" "svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))\n" "svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))\n" "svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))\n" "svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))\n" "svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))\n" "svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))\n" "svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))\n" "svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))\n" "svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))\n" "svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))\n" "svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))\n" "svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))\n" "svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))\n" "svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))\n" "svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))\n" "svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))\n" "svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))\n" "svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))\n" "svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))\n" "svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))\n" "svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))\n" "svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))\n" "svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))\n" "svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))\n" "svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))\n" "svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))\n" "svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))\n" "svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))\n" "svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))\n" "svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))\n" "svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))\n" "svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))\n" "svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))\n" "svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))\n" "svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))\n" "svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))\n" "svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))\n" "svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))\n" "svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))\n" "svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))\n" "svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))\n" "svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))\n" "svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))\n" "svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))\n" "svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))\n" "svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))\n" "svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))\n" "svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))\n" "svuint8_t svld1rq_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))\n" "svuint32_t svld1rq_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))\n" "svuint64_t svld1rq_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))\n" "svuint16_t svld1rq_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))\n" "svint8_t svld1rq_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))\n" "svfloat64_t svld1rq_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))\n" "svfloat32_t svld1rq_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))\n" "svfloat16_t svld1rq_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))\n" "svint32_t svld1rq_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))\n" "svint64_t svld1rq_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))\n" "svint16_t svld1rq_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))\n" "svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))\n" "svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))\n" "svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))\n" "svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))\n" "svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))\n" "svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))\n" "svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))\n" "svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))\n" "svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))\n" "svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))\n" "svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))\n" "svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))\n" "svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))\n" "svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))\n" "svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))\n" "svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32)))\n" "svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64)))\n" "svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16)))\n" "svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32)))\n" "svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64)))\n" "svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16)))\n" "svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32)))\n" "svuint32_t svld1sb_u32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64)))\n" "svuint64_t svld1sb_u64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16)))\n" "svuint16_t svld1sb_u16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32)))\n" "svint32_t svld1sb_s32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64)))\n" "svint64_t svld1sb_s64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16)))\n" "svint16_t svld1sb_s16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))\n" "svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))\n" "svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))\n" "svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))\n" "svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))\n" "svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))\n" "svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))\n" "svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))\n" "svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))\n" "svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))\n" "svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))\n" "svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))\n" "svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))\n" "svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))\n" "svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))\n" "svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))\n" "svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))\n" "svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))\n" "svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))\n" "svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))\n" "svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))\n" "svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))\n" "svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))\n" "svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))\n" "svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))\n" "svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))\n" "svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))\n" "svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))\n" "svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32)))\n" "svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64)))\n" "svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32)))\n" "svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64)))\n" "svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32)))\n" "svuint32_t svld1sh_u32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64)))\n" "svuint64_t svld1sh_u64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32)))\n" "svint32_t svld1sh_s32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64)))\n" "svint64_t svld1sh_s64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))\n" "svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))\n" "svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))\n" "svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))\n" "svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))\n" "svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))\n" "svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))\n" "svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))\n" "svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))\n" "svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))\n" "svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))\n" "svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))\n" "svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))\n" "svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))\n" "svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64)))\n" "svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64)))\n" "svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64)))\n" "svuint64_t svld1sw_u64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64)))\n" "svint64_t svld1sw_s64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))\n" "svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))\n" "svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))\n" "svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))\n" "svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))\n" "svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))\n" "svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))\n" "svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))\n" "svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))\n" "svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))\n" "svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))\n" "svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))\n" "svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))\n" "svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))\n" "svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))\n" "svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))\n" "svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32)))\n" "svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64)))\n" "svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16)))\n" "svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32)))\n" "svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64)))\n" "svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16)))\n" "svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32)))\n" "svuint32_t svld1ub_u32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64)))\n" "svuint64_t svld1ub_u64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16)))\n" "svuint16_t svld1ub_u16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32)))\n" "svint32_t svld1ub_s32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64)))\n" "svint64_t svld1ub_s64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16)))\n" "svint16_t svld1ub_s16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))\n" "svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))\n" "svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))\n" "svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))\n" "svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))\n" "svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))\n" "svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))\n" "svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))\n" "svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))\n" "svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))\n" "svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))\n" "svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))\n" "svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))\n" "svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))\n" "svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))\n" "svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))\n" "svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))\n" "svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))\n" "svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))\n" "svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))\n" "svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))\n" "svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))\n" "svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))\n" "svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))\n" "svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))\n" "svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))\n" "svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))\n" "svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))\n" "svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32)))\n" "svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64)))\n" "svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32)))\n" "svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64)))\n" "svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32)))\n" "svuint32_t svld1uh_u32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64)))\n" "svuint64_t svld1uh_u64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32)))\n" "svint32_t svld1uh_s32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64)))\n" "svint64_t svld1uh_s64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))\n" "svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))\n" "svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))\n" "svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))\n" "svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))\n" "svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))\n" "svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))\n" "svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))\n" "svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))\n" "svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))\n" "svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))\n" "svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))\n" "svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))\n" "svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))\n" "svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64)))\n" "svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64)))\n" "svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64)))\n" "svuint64_t svld1uw_u64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64)))\n" "svint64_t svld1uw_s64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))\n" "svuint8x2_t svld2_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))\n" "svuint32x2_t svld2_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))\n" "svuint64x2_t svld2_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))\n" "svuint16x2_t svld2_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))\n" "svint8x2_t svld2_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))\n" "svfloat64x2_t svld2_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))\n" "svfloat32x2_t svld2_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))\n" "svfloat16x2_t svld2_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))\n" "svint32x2_t svld2_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))\n" "svint64x2_t svld2_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))\n" "svint16x2_t svld2_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))\n" "svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))\n" "svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))\n" "svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))\n" "svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))\n" "svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))\n" "svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))\n" "svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))\n" "svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))\n" "svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))\n" "svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))\n" "svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))\n" "svuint8x3_t svld3_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))\n" "svuint32x3_t svld3_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))\n" "svuint64x3_t svld3_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))\n" "svuint16x3_t svld3_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))\n" "svint8x3_t svld3_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))\n" "svfloat64x3_t svld3_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))\n" "svfloat32x3_t svld3_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))\n" "svfloat16x3_t svld3_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))\n" "svint32x3_t svld3_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))\n" "svint64x3_t svld3_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))\n" "svint16x3_t svld3_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))\n" "svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))\n" "svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))\n" "svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))\n" "svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))\n" "svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))\n" "svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))\n" "svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))\n" "svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))\n" "svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))\n" "svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))\n" "svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))\n" "svuint8x4_t svld4_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))\n" "svuint32x4_t svld4_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))\n" "svuint64x4_t svld4_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))\n" "svuint16x4_t svld4_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))\n" "svint8x4_t svld4_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))\n" "svfloat64x4_t svld4_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))\n" "svfloat32x4_t svld4_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))\n" "svfloat16x4_t svld4_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))\n" "svint32x4_t svld4_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))\n" "svint64x4_t svld4_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))\n" "svint16x4_t svld4_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))\n" "svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))\n" "svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))\n" "svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))\n" "svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))\n" "svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))\n" "svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))\n" "svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))\n" "svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))\n" "svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))\n" "svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))\n" "svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))\n" "svuint8_t svldff1_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))\n" "svuint32_t svldff1_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))\n" "svuint64_t svldff1_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))\n" "svuint16_t svldff1_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))\n" "svint8_t svldff1_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))\n" "svfloat64_t svldff1_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))\n" "svfloat32_t svldff1_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))\n" "svfloat16_t svldff1_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))\n" "svint32_t svldff1_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))\n" "svint64_t svldff1_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))\n" "svint16_t svldff1_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))\n" "svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))\n" "svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))\n" "svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))\n" "svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))\n" "svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))\n" "svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))\n" "svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))\n" "svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))\n" "svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))\n" "svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))\n" "svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))\n" "svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))\n" "svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))\n" "svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))\n" "svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))\n" "svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))\n" "svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))\n" "svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))\n" "svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))\n" "svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))\n" "svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))\n" "svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))\n" "svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))\n" "svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))\n" "svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))\n" "svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))\n" "svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))\n" "svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))\n" "svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))\n" "svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))\n" "svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))\n" "svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))\n" "svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))\n" "svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))\n" "svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))\n" "svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))\n" "svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))\n" "svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))\n" "svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))\n" "svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))\n" "svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))\n" "svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))\n" "svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))\n" "svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))\n" "svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))\n" "svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))\n" "svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))\n" "svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))\n" "svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))\n" "svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))\n" "svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))\n" "svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))\n" "svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))\n" "svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))\n" "svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))\n" "svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))\n" "svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))\n" "svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))\n" "svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))\n" "svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))\n" "svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))\n" "svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))\n" "svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))\n" "svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))\n" "svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))\n" "svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))\n" "svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))\n" "svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32)))\n" "svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64)))\n" "svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16)))\n" "svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32)))\n" "svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64)))\n" "svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16)))\n" "svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32)))\n" "svuint32_t svldff1sb_u32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64)))\n" "svuint64_t svldff1sb_u64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16)))\n" "svuint16_t svldff1sb_u16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32)))\n" "svint32_t svldff1sb_s32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64)))\n" "svint64_t svldff1sb_s64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16)))\n" "svint16_t svldff1sb_s16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))\n" "svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))\n" "svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))\n" "svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))\n" "svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))\n" "svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))\n" "svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))\n" "svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))\n" "svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))\n" "svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))\n" "svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))\n" "svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))\n" "svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))\n" "svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))\n" "svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))\n" "svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))\n" "svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))\n" "svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))\n" "svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))\n" "svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))\n" "svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))\n" "svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))\n" "svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))\n" "svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))\n" "svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))\n" "svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))\n" "svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))\n" "svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32)))\n" "svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64)))\n" "svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32)))\n" "svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64)))\n" "svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32)))\n" "svuint32_t svldff1sh_u32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64)))\n" "svuint64_t svldff1sh_u64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32)))\n" "svint32_t svldff1sh_s32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64)))\n" "svint64_t svldff1sh_s64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))\n" "svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))\n" "svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))\n" "svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))\n" "svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))\n" "svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))\n" "svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))\n" "svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))\n" "svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))\n" "svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))\n" "svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))\n" "svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))\n" "svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))\n" "svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64)))\n" "svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64)))\n" "svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64)))\n" "svuint64_t svldff1sw_u64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64)))\n" "svint64_t svldff1sw_s64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))\n" "svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))\n" "svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))\n" "svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))\n" "svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))\n" "svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))\n" "svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))\n" "svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))\n" "svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))\n" "svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))\n" "svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))\n" "svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))\n" "svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))\n" "svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))\n" "svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))\n" "svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))\n" "svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32)))\n" "svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64)))\n" "svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16)))\n" "svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32)))\n" "svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64)))\n" "svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16)))\n" "svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32)))\n" "svuint32_t svldff1ub_u32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64)))\n" "svuint64_t svldff1ub_u64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16)))\n" "svuint16_t svldff1ub_u16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32)))\n" "svint32_t svldff1ub_s32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64)))\n" "svint64_t svldff1ub_s64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16)))\n" "svint16_t svldff1ub_s16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))\n" "svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))\n" "svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))\n" "svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))\n" "svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))\n" "svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))\n" "svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))\n" "svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))\n" "svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))\n" "svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))\n" "svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))\n" "svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))\n" "svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))\n" "svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))\n" "svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))\n" "svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))\n" "svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))\n" "svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))\n" "svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))\n" "svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))\n" "svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))\n" "svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))\n" "svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))\n" "svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))\n" "svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))\n" "svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))\n" "svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))\n" "svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))\n" "svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32)))\n" "svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64)))\n" "svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32)))\n" "svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64)))\n" "svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32)))\n" "svuint32_t svldff1uh_u32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64)))\n" "svuint64_t svldff1uh_u64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32)))\n" "svint32_t svldff1uh_s32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64)))\n" "svint64_t svldff1uh_s64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))\n" "svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))\n" "svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))\n" "svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))\n" "svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))\n" "svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))\n" "svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))\n" "svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))\n" "svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))\n" "svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))\n" "svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))\n" "svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))\n" "svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))\n" "svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))\n" "svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64)))\n" "svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64)))\n" "svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64)))\n" "svuint64_t svldff1uw_u64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64)))\n" "svint64_t svldff1uw_s64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))\n" "svuint8_t svldnf1_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))\n" "svuint32_t svldnf1_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))\n" "svuint64_t svldnf1_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))\n" "svuint16_t svldnf1_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))\n" "svint8_t svldnf1_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))\n" "svfloat64_t svldnf1_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))\n" "svfloat32_t svldnf1_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))\n" "svfloat16_t svldnf1_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))\n" "svint32_t svldnf1_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))\n" "svint64_t svldnf1_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))\n" "svint16_t svldnf1_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))\n" "svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))\n" "svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))\n" "svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))\n" "svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))\n" "svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))\n" "svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))\n" "svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))\n" "svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))\n" "svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))\n" "svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))\n" "svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32)))\n" "svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64)))\n" "svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16)))\n" "svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32)))\n" "svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64)))\n" "svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16)))\n" "svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32)))\n" "svuint32_t svldnf1sb_u32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64)))\n" "svuint64_t svldnf1sb_u64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16)))\n" "svuint16_t svldnf1sb_u16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32)))\n" "svint32_t svldnf1sb_s32(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64)))\n" "svint64_t svldnf1sb_s64(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16)))\n" "svint16_t svldnf1sb_s16(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32)))\n" "svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64)))\n" "svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32)))\n" "svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64)))\n" "svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32)))\n" "svuint32_t svldnf1sh_u32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64)))\n" "svuint64_t svldnf1sh_u64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32)))\n" "svint32_t svldnf1sh_s32(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64)))\n" "svint64_t svldnf1sh_s64(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64)))\n" "svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64)))\n" "svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64)))\n" "svuint64_t svldnf1sw_u64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64)))\n" "svint64_t svldnf1sw_s64(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32)))\n" "svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64)))\n" "svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16)))\n" "svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32)))\n" "svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64)))\n" "svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16)))\n" "svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32)))\n" "svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64)))\n" "svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16)))\n" "svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32)))\n" "svint32_t svldnf1ub_s32(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64)))\n" "svint64_t svldnf1ub_s64(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16)))\n" "svint16_t svldnf1ub_s16(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32)))\n" "svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64)))\n" "svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32)))\n" "svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64)))\n" "svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32)))\n" "svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64)))\n" "svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32)))\n" "svint32_t svldnf1uh_s32(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64)))\n" "svint64_t svldnf1uh_s64(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64)))\n" "svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64)))\n" "svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64)))\n" "svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64)))\n" "svint64_t svldnf1uw_s64(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))\n" "svuint8_t svldnt1_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))\n" "svuint32_t svldnt1_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))\n" "svuint64_t svldnt1_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))\n" "svuint16_t svldnt1_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))\n" "svint8_t svldnt1_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))\n" "svfloat64_t svldnt1_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))\n" "svfloat32_t svldnt1_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))\n" "svfloat16_t svldnt1_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))\n" "svint32_t svldnt1_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))\n" "svint64_t svldnt1_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))\n" "svint16_t svldnt1_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))\n" "svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))\n" "svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))\n" "svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))\n" "svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))\n" "svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))\n" "svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))\n" "svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))\n" "svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))\n" "svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))\n" "svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))\n" "svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))\n" "uint64_t svlen_u8(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))\n" "uint64_t svlen_u32(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))\n" "uint64_t svlen_u64(svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))\n" "uint64_t svlen_u16(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))\n" "uint64_t svlen_s8(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))\n" "uint64_t svlen_f64(svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))\n" "uint64_t svlen_f32(svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))\n" "uint64_t svlen_f16(svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))\n" "uint64_t svlen_s32(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))\n" "uint64_t svlen_s64(svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))\n" "uint64_t svlen_s16(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))\n" "svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))\n" "svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))\n" "svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))\n" "svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))\n" "svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))\n" "svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))\n" "svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))\n" "svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))\n" "svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))\n" "svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))\n" "svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))\n" "svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))\n" "svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))\n" "svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))\n" "svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))\n" "svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))\n" "svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))\n" "svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))\n" "svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))\n" "svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))\n" "svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))\n" "svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))\n" "svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))\n" "svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))\n" "svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))\n" "svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))\n" "svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))\n" "svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))\n" "svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))\n" "svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))\n" "svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))\n" "svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))\n" "svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))\n" "svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))\n" "svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))\n" "svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))\n" "svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))\n" "svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))\n" "svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))\n" "svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))\n" "svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))\n" "svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))\n" "svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))\n" "svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))\n" "svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))\n" "svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))\n" "svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))\n" "svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))\n" "svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))\n" "svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))\n" "svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))\n" "svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))\n" "svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))\n" "svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))\n" "svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))\n" "svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))\n" "svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))\n" "svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))\n" "svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))\n" "svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))\n" "svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))\n" "svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))\n" "svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))\n" "svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))\n" "svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))\n" "svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))\n" "svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))\n" "svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))\n" "svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))\n" "svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))\n" "svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))\n" "svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))\n" "svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))\n" "svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))\n" "svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))\n" "svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))\n" "svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))\n" "svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))\n" "svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))\n" "svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))\n" "svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))\n" "svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))\n" "svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))\n" "svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))\n" "svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))\n" "svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))\n" "svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))\n" "svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))\n" "svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))\n" "svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))\n" "svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))\n" "svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))\n" "svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))\n" "svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))\n" "svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))\n" "svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))\n" "svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))\n" "svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))\n" "svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))\n" "svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))\n" "svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))\n" "svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))\n" "svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))\n" "svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))\n" "svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))\n" "svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))\n" "svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))\n" "svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))\n" "svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))\n" "svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))\n" "svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))\n" "svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))\n" "svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))\n" "svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))\n" "svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))\n" "svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))\n" "svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))\n" "svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))\n" "svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))\n" "svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))\n" "svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))\n" "svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))\n" "svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))\n" "svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))\n" "svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))\n" "svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))\n" "svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))\n" "svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))\n" "svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))\n" "svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))\n" "svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))\n" "svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))\n" "svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))\n" "svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))\n" "svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))\n" "svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))\n" "svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))\n" "svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))\n" "svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))\n" "svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))\n" "svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))\n" "svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))\n" "svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))\n" "svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))\n" "svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))\n" "svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))\n" "svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))\n" "svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))\n" "svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))\n" "svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))\n" "svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))\n" "svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))\n" "svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))\n" "svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))\n" "svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))\n" "svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))\n" "svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))\n" "svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))\n" "svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))\n" "svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))\n" "svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))\n" "svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))\n" "svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))\n" "svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))\n" "svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))\n" "svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))\n" "svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))\n" "svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))\n" "svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))\n" "svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))\n" "svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))\n" "svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))\n" "svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))\n" "svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))\n" "svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))\n" "svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))\n" "svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))\n" "svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))\n" "svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))\n" "svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))\n" "svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))\n" "svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))\n" "svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))\n" "svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))\n" "svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))\n" "svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))\n" "svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))\n" "svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))\n" "svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))\n" "svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))\n" "svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))\n" "svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))\n" "svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))\n" "svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))\n" "svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))\n" "svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))\n" "svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))\n" "svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))\n" "svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))\n" "svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))\n" "svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))\n" "svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))\n" "svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))\n" "svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))\n" "svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))\n" "svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))\n" "svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))\n" "svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))\n" "svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))\n" "svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))\n" "svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))\n" "svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))\n" "svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))\n" "svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))\n" "svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))\n" "svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))\n" "svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))\n" "svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))\n" "svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))\n" "svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))\n" "svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))\n" "svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))\n" "svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))\n" "svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))\n" "svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))\n" "svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))\n" "svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))\n" "svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))\n" "svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))\n" "svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))\n" "svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))\n" "svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))\n" "svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))\n" "svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))\n" "svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))\n" "svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))\n" "svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))\n" "svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))\n" "svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))\n" "svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))\n" "svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))\n" "svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))\n" "svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))\n" "svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))\n" "svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))\n" "svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))\n" "svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))\n" "svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))\n" "svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))\n" "svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))\n" "svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))\n" "svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))\n" "svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))\n" "svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))\n" "svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))\n" "svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))\n" "svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))\n" "svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))\n" "svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))\n" "svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))\n" "svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))\n" "svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))\n" "svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))\n" "svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))\n" "svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))\n" "svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))\n" "svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))\n" "svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))\n" "svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))\n" "svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))\n" "svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))\n" "svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))\n" "svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))\n" "svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))\n" "svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))\n" "svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))\n" "float64_t svmaxnmv_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))\n" "float32_t svmaxnmv_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))\n" "float16_t svmaxnmv_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))\n" "float64_t svmaxv_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))\n" "float32_t svmaxv_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))\n" "float16_t svmaxv_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))\n" "int8_t svmaxv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))\n" "int32_t svmaxv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))\n" "int64_t svmaxv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))\n" "int16_t svmaxv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))\n" "uint8_t svmaxv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))\n" "uint32_t svmaxv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))\n" "uint64_t svmaxv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))\n" "uint16_t svmaxv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))\n" "svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))\n" "svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))\n" "svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))\n" "svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))\n" "svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))\n" "svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))\n" "svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))\n" "svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))\n" "svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))\n" "svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))\n" "svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))\n" "svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))\n" "svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))\n" "svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))\n" "svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))\n" "svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))\n" "svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))\n" "svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))\n" "svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))\n" "svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))\n" "svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))\n" "svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))\n" "svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))\n" "svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))\n" "svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))\n" "svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))\n" "svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))\n" "svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))\n" "svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))\n" "svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))\n" "svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))\n" "svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))\n" "svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))\n" "svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))\n" "svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))\n" "svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))\n" "svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))\n" "svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))\n" "svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))\n" "svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))\n" "svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))\n" "svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))\n" "svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))\n" "svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))\n" "svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))\n" "svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))\n" "svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))\n" "svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))\n" "svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))\n" "svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))\n" "svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))\n" "svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))\n" "svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))\n" "svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))\n" "svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))\n" "svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))\n" "svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))\n" "svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))\n" "svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))\n" "svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))\n" "svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))\n" "svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))\n" "svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))\n" "svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))\n" "svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))\n" "svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))\n" "svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))\n" "svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))\n" "svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))\n" "svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))\n" "svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))\n" "svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))\n" "svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))\n" "svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))\n" "svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))\n" "svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))\n" "svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))\n" "svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))\n" "svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))\n" "svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))\n" "svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))\n" "svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))\n" "svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))\n" "svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))\n" "float64_t svminnmv_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))\n" "float32_t svminnmv_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))\n" "float16_t svminnmv_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))\n" "float64_t svminv_f64(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))\n" "float32_t svminv_f32(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))\n" "float16_t svminv_f16(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))\n" "int8_t svminv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))\n" "int32_t svminv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))\n" "int64_t svminv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))\n" "int16_t svminv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))\n" "uint8_t svminv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))\n" "uint32_t svminv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))\n" "uint64_t svminv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))\n" "uint16_t svminv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))\n" "svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))\n" "svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))\n" "svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))\n" "svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))\n" "svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))\n" "svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))\n" "svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))\n" "svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))\n" "svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))\n" "svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))\n" "svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))\n" "svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))\n" "svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))\n" "svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))\n" "svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))\n" "svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))\n" "svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))\n" "svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))\n" "svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))\n" "svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))\n" "svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))\n" "svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))\n" "svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))\n" "svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))\n" "svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))\n" "svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))\n" "svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))\n" "svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))\n" "svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))\n" "svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))\n" "svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))\n" "svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))\n" "svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))\n" "svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))\n" "svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))\n" "svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))\n" "svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))\n" "svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))\n" "svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))\n" "svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))\n" "svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))\n" "svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))\n" "svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))\n" "svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))\n" "svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))\n" "svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))\n" "svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))\n" "svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))\n" "svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))\n" "svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))\n" "svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))\n" "svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))\n" "svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))\n" "svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))\n" "svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))\n" "svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))\n" "svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))\n" "svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))\n" "svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))\n" "svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))\n" "svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))\n" "svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))\n" "svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))\n" "svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))\n" "svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))\n" "svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))\n" "svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))\n" "svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))\n" "svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))\n" "svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))\n" "svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))\n" "svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))\n" "svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))\n" "svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))\n" "svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))\n" "svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))\n" "svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))\n" "svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))\n" "svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))\n" "svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))\n" "svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))\n" "svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))\n" "svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))\n" "svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))\n" "svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))\n" "svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))\n" "svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))\n" "svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))\n" "svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))\n" "svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))\n" "svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))\n" "svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))\n" "svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))\n" "svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))\n" "svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))\n" "svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))\n" "svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))\n" "svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))\n" "svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))\n" "svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))\n" "svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))\n" "svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))\n" "svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))\n" "svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))\n" "svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))\n" "svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))\n" "svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))\n" "svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))\n" "svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))\n" "svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))\n" "svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))\n" "svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))\n" "svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))\n" "svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))\n" "svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))\n" "svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))\n" "svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))\n" "svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))\n" "svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))\n" "svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))\n" "svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))\n" "svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))\n" "svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))\n" "svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))\n" "svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))\n" "svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))\n" "svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))\n" "svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))\n" "svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))\n" "svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))\n" "svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))\n" "svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))\n" "svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))\n" "svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))\n" "svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))\n" "svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))\n" "svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))\n" "svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))\n" "svbool_t svmov_b_z(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))\n" "svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))\n" "svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))\n" "svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))\n" "svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))\n" "svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))\n" "svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))\n" "svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))\n" "svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))\n" "svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))\n" "svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))\n" "svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))\n" "svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))\n" "svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))\n" "svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))\n" "svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))\n" "svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))\n" "svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))\n" "svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))\n" "svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))\n" "svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))\n" "svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))\n" "svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))\n" "svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))\n" "svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))\n" "svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))\n" "svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))\n" "svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))\n" "svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))\n" "svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))\n" "svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))\n" "svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))\n" "svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))\n" "svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))\n" "svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))\n" "svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))\n" "svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))\n" "svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))\n" "svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))\n" "svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))\n" "svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))\n" "svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))\n" "svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))\n" "svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))\n" "svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))\n" "svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))\n" "svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))\n" "svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))\n" "svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))\n" "svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))\n" "svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))\n" "svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))\n" "svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))\n" "svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))\n" "svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))\n" "svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))\n" "svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))\n" "svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))\n" "svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))\n" "svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))\n" "svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))\n" "svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))\n" "svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))\n" "svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))\n" "svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))\n" "svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))\n" "svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))\n" "svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))\n" "svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))\n" "svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))\n" "svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))\n" "svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))\n" "svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))\n" "svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))\n" "svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))\n" "svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))\n" "svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))\n" "svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))\n" "svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))\n" "svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))\n" "svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))\n" "svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))\n" "svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))\n" "svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))\n" "svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))\n" "svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))\n" "svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))\n" "svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))\n" "svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))\n" "svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))\n" "svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))\n" "svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))\n" "svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))\n" "svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))\n" "svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))\n" "svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))\n" "svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))\n" "svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))\n" "svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))\n" "svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))\n" "svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))\n" "svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))\n" "svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))\n" "svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))\n" "svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))\n" "svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))\n" "svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))\n" "svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))\n" "svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))\n" "svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))\n" "svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))\n" "svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))\n" "svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))\n" "svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))\n" "svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))\n" "svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))\n" "svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))\n" "svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))\n" "svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))\n" "svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))\n" "svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))\n" "svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))\n" "svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))\n" "svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))\n" "svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))\n" "svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))\n" "svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))\n" "svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))\n" "svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))\n" "svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))\n" "svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))\n" "svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))\n" "svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))\n" "svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))\n" "svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))\n" "svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))\n" "svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))\n" "svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))\n" "svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))\n" "svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))\n" "svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))\n" "svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))\n" "svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))\n" "svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))\n" "svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))\n" "svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))\n" "svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))\n" "svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))\n" "svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))\n" "svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))\n" "svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))\n" "svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))\n" "svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))\n" "svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))\n" "svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))\n" "svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))\n" "svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))\n" "svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))\n" "svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))\n" "svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))\n" "svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))\n" "svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))\n" "svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))\n" "svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))\n" "svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))\n" "svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))\n" "svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))\n" "svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))\n" "svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))\n" "svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))\n" "svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))\n" "svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))\n" "svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))\n" "svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))\n" "svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))\n" "svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))\n" "svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))\n" "svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))\n" "svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))\n" "svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))\n" "svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))\n" "svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))\n" "svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))\n" "svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))\n" "svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))\n" "svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))\n" "svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))\n" "svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))\n" "svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))\n" "svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))\n" "svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))\n" "svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))\n" "svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))\n" "svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))\n" "svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))\n" "svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))\n" "svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))\n" "svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))\n" "svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))\n" "svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))\n" "svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))\n" "svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))\n" "svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))\n" "svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))\n" "svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))\n" "svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))\n" "svfloat64_t svneg_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))\n" "svfloat32_t svneg_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))\n" "svfloat16_t svneg_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))\n" "svfloat64_t svneg_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))\n" "svfloat32_t svneg_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))\n" "svfloat16_t svneg_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))\n" "svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))\n" "svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))\n" "svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))\n" "svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))\n" "svint8_t svneg_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))\n" "svint32_t svneg_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))\n" "svint64_t svneg_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))\n" "svint16_t svneg_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))\n" "svint8_t svneg_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))\n" "svint32_t svneg_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))\n" "svint64_t svneg_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))\n" "svint16_t svneg_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))\n" "svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))\n" "svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))\n" "svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))\n" "svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))\n" "svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))\n" "svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))\n" "svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))\n" "svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))\n" "svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))\n" "svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))\n" "svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))\n" "svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))\n" "svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))\n" "svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))\n" "svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))\n" "svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))\n" "svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))\n" "svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))\n" "svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))\n" "svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))\n" "svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))\n" "svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))\n" "svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))\n" "svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))\n" "svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))\n" "svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))\n" "svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))\n" "svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))\n" "svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))\n" "svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))\n" "svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))\n" "svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))\n" "svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))\n" "svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))\n" "svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))\n" "svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))\n" "svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))\n" "svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))\n" "svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))\n" "svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))\n" "svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))\n" "svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))\n" "svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))\n" "svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))\n" "svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))\n" "svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))\n" "svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))\n" "svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))\n" "svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))\n" "svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))\n" "svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))\n" "svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))\n" "svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))\n" "svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))\n" "svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))\n" "svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))\n" "svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))\n" "svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))\n" "svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))\n" "svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))\n" "svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))\n" "svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))\n" "svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))\n" "svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))\n" "svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))\n" "svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))\n" "svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))\n" "svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))\n" "svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))\n" "svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))\n" "svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))\n" "svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))\n" "svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))\n" "svbool_t svnot_b_z(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))\n" "svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))\n" "svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))\n" "svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))\n" "svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))\n" "svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))\n" "svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))\n" "svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))\n" "svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))\n" "svuint8_t svnot_u8_x(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))\n" "svuint32_t svnot_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))\n" "svuint64_t svnot_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))\n" "svuint16_t svnot_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))\n" "svint8_t svnot_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))\n" "svint32_t svnot_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))\n" "svint64_t svnot_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))\n" "svint16_t svnot_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))\n" "svuint8_t svnot_u8_z(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))\n" "svuint32_t svnot_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))\n" "svuint64_t svnot_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))\n" "svuint16_t svnot_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))\n" "svint8_t svnot_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))\n" "svint32_t svnot_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))\n" "svint64_t svnot_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))\n" "svint16_t svnot_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))\n" "svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))\n" "svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))\n" "svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))\n" "svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))\n" "svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))\n" "svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))\n" "svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))\n" "svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))\n" "svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))\n" "svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))\n" "svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))\n" "svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))\n" "svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))\n" "svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))\n" "svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))\n" "svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))\n" "svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))\n" "svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))\n" "svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))\n" "svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))\n" "svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))\n" "svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))\n" "svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))\n" "svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))\n" "svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))\n" "svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))\n" "svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))\n" "svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))\n" "svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))\n" "svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))\n" "svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))\n" "svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))\n" "svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))\n" "svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))\n" "svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))\n" "svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))\n" "svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))\n" "svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))\n" "svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))\n" "svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))\n" "svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))\n" "svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))\n" "svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))\n" "svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))\n" "svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))\n" "svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))\n" "svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))\n" "svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))\n" "svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))\n" "svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))\n" "uint8_t svorv_u8(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))\n" "uint32_t svorv_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))\n" "uint64_t svorv_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))\n" "uint16_t svorv_u16(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))\n" "int8_t svorv_s8(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))\n" "int32_t svorv_s32(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))\n" "int64_t svorv_s64(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))\n" "int16_t svorv_s16(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))\n" "svbool_t svpfalse_b(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))\n" "svbool_t svpfirst_b(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8)))\n" "svbool_t svpnext_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32)))\n" "svbool_t svpnext_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64)))\n" "svbool_t svpnext_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16)))\n" "svbool_t svpnext_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb)))\n" "void svprfb(svbool_t, void const *, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))\n" "void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))\n" "void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))\n" "void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))\n" "void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))\n" "void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))\n" "void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))\n" "void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))\n" "void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum)))\n" "void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd)))\n" "void svprfd(svbool_t, void const *, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))\n" "void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))\n" "void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))\n" "void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))\n" "void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))\n" "void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))\n" "void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))\n" "void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))\n" "void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum)))\n" "void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh)))\n" "void svprfh(svbool_t, void const *, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))\n" "void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))\n" "void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))\n" "void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))\n" "void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))\n" "void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))\n" "void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))\n" "void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))\n" "void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum)))\n" "void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw)))\n" "void svprfw(svbool_t, void const *, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))\n" "void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))\n" "void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))\n" "void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))\n" "void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))\n" "void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))\n" "void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))\n" "void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))\n" "void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum)))\n" "void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any)))\n" "bool svptest_any(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first)))\n" "bool svptest_first(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last)))\n" "bool svptest_last(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8)))\n" "svbool_t svptrue_pat_b8(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32)))\n" "svbool_t svptrue_pat_b32(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64)))\n" "svbool_t svptrue_pat_b64(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16)))\n" "svbool_t svptrue_pat_b16(enum svpattern);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8)))\n" "svbool_t svptrue_b8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32)))\n" "svbool_t svptrue_b32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64)))\n" "svbool_t svptrue_b64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16)))\n" "svbool_t svptrue_b16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))\n" "svint8_t svqadd_n_s8(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))\n" "svint32_t svqadd_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))\n" "svint64_t svqadd_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))\n" "svint16_t svqadd_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))\n" "svuint8_t svqadd_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))\n" "svuint32_t svqadd_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))\n" "svuint64_t svqadd_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))\n" "svuint16_t svqadd_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))\n" "svint8_t svqadd_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))\n" "svint32_t svqadd_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))\n" "svint64_t svqadd_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))\n" "svint16_t svqadd_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))\n" "svuint8_t svqadd_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))\n" "svuint32_t svqadd_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))\n" "svuint64_t svqadd_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))\n" "svuint16_t svqadd_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))\n" "int32_t svqdecb_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))\n" "int64_t svqdecb_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))\n" "uint32_t svqdecb_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))\n" "uint64_t svqdecb_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))\n" "int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))\n" "int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))\n" "uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))\n" "uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))\n" "int32_t svqdecd_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))\n" "int64_t svqdecd_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))\n" "uint32_t svqdecd_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))\n" "uint64_t svqdecd_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))\n" "svint64_t svqdecd_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))\n" "svuint64_t svqdecd_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))\n" "int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))\n" "int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))\n" "uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))\n" "uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))\n" "svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))\n" "svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))\n" "int32_t svqdech_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))\n" "int64_t svqdech_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))\n" "uint32_t svqdech_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))\n" "uint64_t svqdech_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))\n" "svint16_t svqdech_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))\n" "svuint16_t svqdech_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))\n" "int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))\n" "int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))\n" "uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))\n" "uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))\n" "svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))\n" "svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))\n" "int32_t svqdecp_n_s32_b8(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))\n" "int32_t svqdecp_n_s32_b32(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))\n" "int32_t svqdecp_n_s32_b64(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))\n" "int32_t svqdecp_n_s32_b16(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))\n" "int64_t svqdecp_n_s64_b8(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))\n" "int64_t svqdecp_n_s64_b32(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))\n" "int64_t svqdecp_n_s64_b64(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))\n" "int64_t svqdecp_n_s64_b16(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))\n" "uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))\n" "uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))\n" "uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))\n" "uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))\n" "uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))\n" "uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))\n" "uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))\n" "uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))\n" "svint32_t svqdecp_s32(svint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))\n" "svint64_t svqdecp_s64(svint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))\n" "svint16_t svqdecp_s16(svint16_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))\n" "svuint32_t svqdecp_u32(svuint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))\n" "svuint64_t svqdecp_u64(svuint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))\n" "svuint16_t svqdecp_u16(svuint16_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))\n" "int32_t svqdecw_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))\n" "int64_t svqdecw_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))\n" "uint32_t svqdecw_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))\n" "uint64_t svqdecw_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))\n" "svint32_t svqdecw_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))\n" "svuint32_t svqdecw_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))\n" "int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))\n" "int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))\n" "uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))\n" "uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))\n" "svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))\n" "svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))\n" "int32_t svqincb_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))\n" "int64_t svqincb_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))\n" "uint32_t svqincb_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))\n" "uint64_t svqincb_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))\n" "int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))\n" "int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))\n" "uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))\n" "uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))\n" "int32_t svqincd_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))\n" "int64_t svqincd_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))\n" "uint32_t svqincd_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))\n" "uint64_t svqincd_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))\n" "svint64_t svqincd_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))\n" "svuint64_t svqincd_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))\n" "int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))\n" "int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))\n" "uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))\n" "uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))\n" "svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))\n" "svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))\n" "int32_t svqinch_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))\n" "int64_t svqinch_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))\n" "uint32_t svqinch_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))\n" "uint64_t svqinch_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))\n" "svint16_t svqinch_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))\n" "svuint16_t svqinch_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))\n" "int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))\n" "int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))\n" "uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))\n" "uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))\n" "svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))\n" "svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))\n" "int32_t svqincp_n_s32_b8(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))\n" "int32_t svqincp_n_s32_b32(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))\n" "int32_t svqincp_n_s32_b64(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))\n" "int32_t svqincp_n_s32_b16(int32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))\n" "int64_t svqincp_n_s64_b8(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))\n" "int64_t svqincp_n_s64_b32(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))\n" "int64_t svqincp_n_s64_b64(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))\n" "int64_t svqincp_n_s64_b16(int64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))\n" "uint32_t svqincp_n_u32_b8(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))\n" "uint32_t svqincp_n_u32_b32(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))\n" "uint32_t svqincp_n_u32_b64(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))\n" "uint32_t svqincp_n_u32_b16(uint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))\n" "uint64_t svqincp_n_u64_b8(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))\n" "uint64_t svqincp_n_u64_b32(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))\n" "uint64_t svqincp_n_u64_b64(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))\n" "uint64_t svqincp_n_u64_b16(uint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))\n" "svint32_t svqincp_s32(svint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))\n" "svint64_t svqincp_s64(svint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))\n" "svint16_t svqincp_s16(svint16_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))\n" "svuint32_t svqincp_u32(svuint32_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))\n" "svuint64_t svqincp_u64(svuint64_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))\n" "svuint16_t svqincp_u16(svuint16_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))\n" "int32_t svqincw_n_s32(int32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))\n" "int64_t svqincw_n_s64(int64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))\n" "uint32_t svqincw_n_u32(uint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))\n" "uint64_t svqincw_n_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))\n" "svint32_t svqincw_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))\n" "svuint32_t svqincw_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))\n" "int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))\n" "int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))\n" "uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))\n" "uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))\n" "svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))\n" "svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))\n" "svint8_t svqsub_n_s8(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))\n" "svint32_t svqsub_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))\n" "svint64_t svqsub_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))\n" "svint16_t svqsub_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))\n" "svuint8_t svqsub_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))\n" "svuint32_t svqsub_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))\n" "svuint64_t svqsub_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))\n" "svuint16_t svqsub_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))\n" "svint8_t svqsub_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))\n" "svint32_t svqsub_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))\n" "svint64_t svqsub_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))\n" "svint16_t svqsub_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))\n" "svuint8_t svqsub_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))\n" "svuint32_t svqsub_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))\n" "svuint64_t svqsub_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))\n" "svuint16_t svqsub_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))\n" "svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))\n" "svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))\n" "svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))\n" "svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))\n" "svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))\n" "svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))\n" "svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))\n" "svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))\n" "svuint8_t svrbit_u8_x(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))\n" "svuint32_t svrbit_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))\n" "svuint64_t svrbit_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))\n" "svuint16_t svrbit_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))\n" "svint8_t svrbit_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))\n" "svint32_t svrbit_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))\n" "svint64_t svrbit_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))\n" "svint16_t svrbit_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))\n" "svuint8_t svrbit_u8_z(svbool_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))\n" "svuint32_t svrbit_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))\n" "svuint64_t svrbit_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))\n" "svuint16_t svrbit_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))\n" "svint8_t svrbit_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))\n" "svint32_t svrbit_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))\n" "svint64_t svrbit_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))\n" "svint16_t svrbit_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr)))\n" "svbool_t svrdffr(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z)))\n" "svbool_t svrdffr_z(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))\n" "svfloat64_t svrecpe_f64(svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))\n" "svfloat32_t svrecpe_f32(svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))\n" "svfloat16_t svrecpe_f16(svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))\n" "svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))\n" "svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))\n" "svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))\n" "svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))\n" "svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))\n" "svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))\n" "svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))\n" "svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))\n" "svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))\n" "svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))\n" "svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))\n" "svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))\n" "svuint8_t svrev_u8(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))\n" "svuint32_t svrev_u32(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))\n" "svuint64_t svrev_u64(svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))\n" "svuint16_t svrev_u16(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))\n" "svint8_t svrev_s8(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))\n" "svfloat64_t svrev_f64(svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))\n" "svfloat32_t svrev_f32(svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))\n" "svfloat16_t svrev_f16(svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))\n" "svint32_t svrev_s32(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))\n" "svint64_t svrev_s64(svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))\n" "svint16_t svrev_s16(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16)))\n" "svbool_t svrev_b16(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32)))\n" "svbool_t svrev_b32(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64)))\n" "svbool_t svrev_b64(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8)))\n" "svbool_t svrev_b8(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))\n" "svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))\n" "svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))\n" "svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))\n" "svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))\n" "svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))\n" "svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))\n" "svuint32_t svrevb_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))\n" "svuint64_t svrevb_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))\n" "svuint16_t svrevb_u16_x(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))\n" "svint32_t svrevb_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))\n" "svint64_t svrevb_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))\n" "svint16_t svrevb_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))\n" "svuint32_t svrevb_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))\n" "svuint64_t svrevb_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))\n" "svuint16_t svrevb_u16_z(svbool_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))\n" "svint32_t svrevb_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))\n" "svint64_t svrevb_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))\n" "svint16_t svrevb_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))\n" "svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))\n" "svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))\n" "svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))\n" "svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))\n" "svuint32_t svrevh_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))\n" "svuint64_t svrevh_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))\n" "svint32_t svrevh_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))\n" "svint64_t svrevh_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))\n" "svuint32_t svrevh_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))\n" "svuint64_t svrevh_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))\n" "svint32_t svrevh_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))\n" "svint64_t svrevh_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))\n" "svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))\n" "svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))\n" "svuint64_t svrevw_u64_x(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))\n" "svint64_t svrevw_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))\n" "svuint64_t svrevw_u64_z(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))\n" "svint64_t svrevw_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))\n" "svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))\n" "svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))\n" "svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))\n" "svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))\n" "svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))\n" "svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))\n" "svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))\n" "svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))\n" "svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))\n" "svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))\n" "svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))\n" "svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))\n" "svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))\n" "svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))\n" "svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))\n" "svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))\n" "svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))\n" "svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))\n" "svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))\n" "svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))\n" "svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))\n" "svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))\n" "svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))\n" "svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))\n" "svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))\n" "svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))\n" "svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))\n" "svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))\n" "svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))\n" "svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))\n" "svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))\n" "svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))\n" "svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))\n" "svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))\n" "svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))\n" "svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))\n" "svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))\n" "svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))\n" "svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))\n" "svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))\n" "svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))\n" "svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))\n" "svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))\n" "svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))\n" "svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))\n" "svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))\n" "svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))\n" "svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))\n" "svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))\n" "svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))\n" "svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))\n" "svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))\n" "svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))\n" "svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))\n" "svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))\n" "svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))\n" "svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))\n" "svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))\n" "svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))\n" "svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))\n" "svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))\n" "svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))\n" "svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))\n" "svfloat64_t svrsqrte_f64(svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))\n" "svfloat32_t svrsqrte_f32(svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))\n" "svfloat16_t svrsqrte_f16(svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))\n" "svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))\n" "svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))\n" "svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))\n" "svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))\n" "svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))\n" "svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))\n" "svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))\n" "svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))\n" "svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))\n" "svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))\n" "svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))\n" "svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))\n" "svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))\n" "svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))\n" "svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))\n" "svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))\n" "svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))\n" "svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))\n" "svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))\n" "svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))\n" "svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))\n" "svbool_t svsel_b(svbool_t, svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))\n" "svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))\n" "svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))\n" "svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))\n" "svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))\n" "svint8_t svsel_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))\n" "svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))\n" "svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))\n" "svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))\n" "svint32_t svsel_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))\n" "svint64_t svsel_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))\n" "svint16_t svsel_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))\n" "svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))\n" "svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))\n" "svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))\n" "svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))\n" "svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))\n" "svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))\n" "svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))\n" "svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))\n" "svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))\n" "svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))\n" "svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))\n" "svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))\n" "svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))\n" "svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))\n" "svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))\n" "svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))\n" "svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))\n" "svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))\n" "svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))\n" "svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))\n" "svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))\n" "svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))\n" "svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))\n" "svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))\n" "svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))\n" "svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))\n" "svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))\n" "svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))\n" "svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))\n" "svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))\n" "svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))\n" "svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))\n" "svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr)))\n" "void svsetffr(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))\n" "svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))\n" "svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))\n" "svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))\n" "svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))\n" "svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))\n" "svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))\n" "svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))\n" "svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))\n" "svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))\n" "svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))\n" "svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))\n" "svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))\n" "svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))\n" "svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))\n" "svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))\n" "svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))\n" "svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))\n" "svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))\n" "svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))\n" "svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))\n" "void svst1_u8(svbool_t, uint8_t *, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))\n" "void svst1_u32(svbool_t, uint32_t *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))\n" "void svst1_u64(svbool_t, uint64_t *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))\n" "void svst1_u16(svbool_t, uint16_t *, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))\n" "void svst1_s8(svbool_t, int8_t *, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))\n" "void svst1_f64(svbool_t, float64_t *, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))\n" "void svst1_f32(svbool_t, float32_t *, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))\n" "void svst1_f16(svbool_t, float16_t *, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))\n" "void svst1_s32(svbool_t, int32_t *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))\n" "void svst1_s64(svbool_t, int64_t *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))\n" "void svst1_s16(svbool_t, int16_t *, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))\n" "void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))\n" "void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))\n" "void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))\n" "void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))\n" "void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))\n" "void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))\n" "void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))\n" "void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))\n" "void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))\n" "void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))\n" "void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))\n" "void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))\n" "void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))\n" "void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))\n" "void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))\n" "void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))\n" "void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))\n" "void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))\n" "void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))\n" "void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))\n" "void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))\n" "void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))\n" "void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))\n" "void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))\n" "void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))\n" "void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))\n" "void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))\n" "void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))\n" "void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))\n" "void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))\n" "void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))\n" "void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))\n" "void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))\n" "void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))\n" "void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))\n" "void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))\n" "void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))\n" "void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))\n" "void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))\n" "void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))\n" "void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))\n" "void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))\n" "void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))\n" "void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))\n" "void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))\n" "void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))\n" "void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))\n" "void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))\n" "void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))\n" "void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))\n" "void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))\n" "void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))\n" "void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))\n" "void svst1b_s32(svbool_t, int8_t *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))\n" "void svst1b_s64(svbool_t, int8_t *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))\n" "void svst1b_s16(svbool_t, int8_t *, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))\n" "void svst1b_u32(svbool_t, uint8_t *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))\n" "void svst1b_u64(svbool_t, uint8_t *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))\n" "void svst1b_u16(svbool_t, uint8_t *, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))\n" "void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))\n" "void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))\n" "void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))\n" "void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))\n" "void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))\n" "void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))\n" "void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))\n" "void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))\n" "void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))\n" "void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))\n" "void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))\n" "void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))\n" "void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))\n" "void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))\n" "void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))\n" "void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))\n" "void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))\n" "void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))\n" "void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))\n" "void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))\n" "void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))\n" "void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))\n" "void svst1h_s32(svbool_t, int16_t *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))\n" "void svst1h_s64(svbool_t, int16_t *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))\n" "void svst1h_u32(svbool_t, uint16_t *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))\n" "void svst1h_u64(svbool_t, uint16_t *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))\n" "void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))\n" "void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))\n" "void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))\n" "void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))\n" "void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))\n" "void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))\n" "void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))\n" "void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))\n" "void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))\n" "void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))\n" "void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))\n" "void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))\n" "void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))\n" "void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))\n" "void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))\n" "void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))\n" "void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))\n" "void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))\n" "void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))\n" "void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))\n" "void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))\n" "void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))\n" "void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))\n" "void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))\n" "void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))\n" "void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))\n" "void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))\n" "void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))\n" "void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))\n" "void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))\n" "void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))\n" "void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))\n" "void svst1w_s64(svbool_t, int32_t *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))\n" "void svst1w_u64(svbool_t, uint32_t *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))\n" "void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))\n" "void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))\n" "void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))\n" "void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))\n" "void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))\n" "void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))\n" "void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))\n" "void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))\n" "void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))\n" "void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))\n" "void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))\n" "void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))\n" "void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))\n" "void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))\n" "void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))\n" "void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))\n" "void svst2_u8(svbool_t, uint8_t *, svuint8x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))\n" "void svst2_u32(svbool_t, uint32_t *, svuint32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))\n" "void svst2_u64(svbool_t, uint64_t *, svuint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))\n" "void svst2_u16(svbool_t, uint16_t *, svuint16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))\n" "void svst2_s8(svbool_t, int8_t *, svint8x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))\n" "void svst2_f64(svbool_t, float64_t *, svfloat64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))\n" "void svst2_f32(svbool_t, float32_t *, svfloat32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))\n" "void svst2_f16(svbool_t, float16_t *, svfloat16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))\n" "void svst2_s32(svbool_t, int32_t *, svint32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))\n" "void svst2_s64(svbool_t, int64_t *, svint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))\n" "void svst2_s16(svbool_t, int16_t *, svint16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))\n" "void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))\n" "void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))\n" "void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))\n" "void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))\n" "void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))\n" "void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))\n" "void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))\n" "void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))\n" "void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))\n" "void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))\n" "void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))\n" "void svst3_u8(svbool_t, uint8_t *, svuint8x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))\n" "void svst3_u32(svbool_t, uint32_t *, svuint32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))\n" "void svst3_u64(svbool_t, uint64_t *, svuint64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))\n" "void svst3_u16(svbool_t, uint16_t *, svuint16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))\n" "void svst3_s8(svbool_t, int8_t *, svint8x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))\n" "void svst3_f64(svbool_t, float64_t *, svfloat64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))\n" "void svst3_f32(svbool_t, float32_t *, svfloat32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))\n" "void svst3_f16(svbool_t, float16_t *, svfloat16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))\n" "void svst3_s32(svbool_t, int32_t *, svint32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))\n" "void svst3_s64(svbool_t, int64_t *, svint64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))\n" "void svst3_s16(svbool_t, int16_t *, svint16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))\n" "void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))\n" "void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))\n" "void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))\n" "void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))\n" "void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))\n" "void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))\n" "void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))\n" "void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))\n" "void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))\n" "void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))\n" "void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))\n" "void svst4_u8(svbool_t, uint8_t *, svuint8x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))\n" "void svst4_u32(svbool_t, uint32_t *, svuint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))\n" "void svst4_u64(svbool_t, uint64_t *, svuint64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))\n" "void svst4_u16(svbool_t, uint16_t *, svuint16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))\n" "void svst4_s8(svbool_t, int8_t *, svint8x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))\n" "void svst4_f64(svbool_t, float64_t *, svfloat64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))\n" "void svst4_f32(svbool_t, float32_t *, svfloat32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))\n" "void svst4_f16(svbool_t, float16_t *, svfloat16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))\n" "void svst4_s32(svbool_t, int32_t *, svint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))\n" "void svst4_s64(svbool_t, int64_t *, svint64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))\n" "void svst4_s16(svbool_t, int16_t *, svint16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))\n" "void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))\n" "void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))\n" "void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))\n" "void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))\n" "void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))\n" "void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))\n" "void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))\n" "void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))\n" "void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))\n" "void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))\n" "void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))\n" "void svstnt1_u8(svbool_t, uint8_t *, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))\n" "void svstnt1_u32(svbool_t, uint32_t *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))\n" "void svstnt1_u64(svbool_t, uint64_t *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))\n" "void svstnt1_u16(svbool_t, uint16_t *, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))\n" "void svstnt1_s8(svbool_t, int8_t *, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))\n" "void svstnt1_f64(svbool_t, float64_t *, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))\n" "void svstnt1_f32(svbool_t, float32_t *, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))\n" "void svstnt1_f16(svbool_t, float16_t *, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))\n" "void svstnt1_s32(svbool_t, int32_t *, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))\n" "void svstnt1_s64(svbool_t, int64_t *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))\n" "void svstnt1_s16(svbool_t, int16_t *, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))\n" "void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))\n" "void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))\n" "void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))\n" "void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))\n" "void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))\n" "void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))\n" "void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))\n" "void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))\n" "void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))\n" "void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))\n" "void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))\n" "svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))\n" "svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))\n" "svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))\n" "svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))\n" "svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))\n" "svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))\n" "svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))\n" "svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))\n" "svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))\n" "svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))\n" "svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))\n" "svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))\n" "svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))\n" "svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))\n" "svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))\n" "svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))\n" "svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))\n" "svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))\n" "svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))\n" "svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))\n" "svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))\n" "svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))\n" "svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))\n" "svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))\n" "svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))\n" "svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))\n" "svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))\n" "svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))\n" "svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))\n" "svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))\n" "svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))\n" "svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))\n" "svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))\n" "svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))\n" "svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))\n" "svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))\n" "svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))\n" "svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))\n" "svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))\n" "svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))\n" "svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))\n" "svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))\n" "svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))\n" "svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))\n" "svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))\n" "svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))\n" "svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))\n" "svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))\n" "svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))\n" "svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))\n" "svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))\n" "svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))\n" "svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))\n" "svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))\n" "svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))\n" "svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))\n" "svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))\n" "svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))\n" "svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))\n" "svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))\n" "svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))\n" "svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))\n" "svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))\n" "svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))\n" "svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))\n" "svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))\n" "svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))\n" "svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))\n" "svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))\n" "svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))\n" "svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))\n" "svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))\n" "svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))\n" "svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))\n" "svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))\n" "svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))\n" "svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))\n" "svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))\n" "svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))\n" "svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))\n" "svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))\n" "svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))\n" "svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))\n" "svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))\n" "svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))\n" "svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))\n" "svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))\n" "svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))\n" "svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))\n" "svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))\n" "svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))\n" "svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))\n" "svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))\n" "svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))\n" "svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))\n" "svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))\n" "svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))\n" "svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))\n" "svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))\n" "svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))\n" "svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))\n" "svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))\n" "svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))\n" "svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))\n" "svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))\n" "svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))\n" "svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))\n" "svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))\n" "svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))\n" "svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))\n" "svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))\n" "svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))\n" "svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))\n" "svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))\n" "svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))\n" "svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))\n" "svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))\n" "svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))\n" "svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))\n" "svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))\n" "svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))\n" "svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))\n" "svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))\n" "svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))\n" "svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))\n" "svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))\n" "svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))\n" "svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))\n" "svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))\n" "svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))\n" "svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))\n" "svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))\n" "svuint8_t svtbl_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))\n" "svuint32_t svtbl_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))\n" "svuint64_t svtbl_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))\n" "svuint16_t svtbl_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))\n" "svint8_t svtbl_s8(svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))\n" "svfloat64_t svtbl_f64(svfloat64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))\n" "svfloat32_t svtbl_f32(svfloat32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))\n" "svfloat16_t svtbl_f16(svfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))\n" "svint32_t svtbl_s32(svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))\n" "svint64_t svtbl_s64(svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))\n" "svint16_t svtbl_s16(svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))\n" "svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))\n" "svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))\n" "svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))\n" "svuint8_t svtrn1_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))\n" "svuint32_t svtrn1_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))\n" "svuint64_t svtrn1_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))\n" "svuint16_t svtrn1_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))\n" "svint8_t svtrn1_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))\n" "svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))\n" "svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))\n" "svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))\n" "svint32_t svtrn1_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))\n" "svint64_t svtrn1_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))\n" "svint16_t svtrn1_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16)))\n" "svbool_t svtrn1_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32)))\n" "svbool_t svtrn1_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64)))\n" "svbool_t svtrn1_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8)))\n" "svbool_t svtrn1_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))\n" "svuint8_t svtrn2_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))\n" "svuint32_t svtrn2_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))\n" "svuint64_t svtrn2_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))\n" "svuint16_t svtrn2_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))\n" "svint8_t svtrn2_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))\n" "svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))\n" "svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))\n" "svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))\n" "svint32_t svtrn2_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))\n" "svint64_t svtrn2_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))\n" "svint16_t svtrn2_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16)))\n" "svbool_t svtrn2_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32)))\n" "svbool_t svtrn2_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64)))\n" "svbool_t svtrn2_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8)))\n" "svbool_t svtrn2_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))\n" "svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))\n" "svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))\n" "svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))\n" "svfloat64_t svtssel_f64(svfloat64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))\n" "svfloat32_t svtssel_f32(svfloat32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))\n" "svfloat16_t svtssel_f16(svfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8)))\n" "svuint8x2_t svundef2_u8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32)))\n" "svuint32x2_t svundef2_u32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64)))\n" "svuint64x2_t svundef2_u64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16)))\n" "svuint16x2_t svundef2_u16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8)))\n" "svint8x2_t svundef2_s8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64)))\n" "svfloat64x2_t svundef2_f64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32)))\n" "svfloat32x2_t svundef2_f32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16)))\n" "svfloat16x2_t svundef2_f16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32)))\n" "svint32x2_t svundef2_s32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64)))\n" "svint64x2_t svundef2_s64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16)))\n" "svint16x2_t svundef2_s16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8)))\n" "svuint8x3_t svundef3_u8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32)))\n" "svuint32x3_t svundef3_u32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64)))\n" "svuint64x3_t svundef3_u64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16)))\n" "svuint16x3_t svundef3_u16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8)))\n" "svint8x3_t svundef3_s8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64)))\n" "svfloat64x3_t svundef3_f64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32)))\n" "svfloat32x3_t svundef3_f32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16)))\n" "svfloat16x3_t svundef3_f16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32)))\n" "svint32x3_t svundef3_s32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64)))\n" "svint64x3_t svundef3_s64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16)))\n" "svint16x3_t svundef3_s16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8)))\n" "svuint8x4_t svundef4_u8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32)))\n" "svuint32x4_t svundef4_u32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64)))\n" "svuint64x4_t svundef4_u64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16)))\n" "svuint16x4_t svundef4_u16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8)))\n" "svint8x4_t svundef4_s8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64)))\n" "svfloat64x4_t svundef4_f64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32)))\n" "svfloat32x4_t svundef4_f32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16)))\n" "svfloat16x4_t svundef4_f16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32)))\n" "svint32x4_t svundef4_s32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64)))\n" "svint64x4_t svundef4_s64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16)))\n" "svint16x4_t svundef4_s16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8)))\n" "svuint8_t svundef_u8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32)))\n" "svuint32_t svundef_u32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64)))\n" "svuint64_t svundef_u64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16)))\n" "svuint16_t svundef_u16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8)))\n" "svint8_t svundef_s8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64)))\n" "svfloat64_t svundef_f64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32)))\n" "svfloat32_t svundef_f32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16)))\n" "svfloat16_t svundef_f16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32)))\n" "svint32_t svundef_s32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64)))\n" "svint64_t svundef_s64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16)))\n" "svint16_t svundef_s16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))\n" "svbool_t svunpkhi_b(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))\n" "svint32_t svunpkhi_s32(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))\n" "svint64_t svunpkhi_s64(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))\n" "svint16_t svunpkhi_s16(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))\n" "svuint32_t svunpkhi_u32(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))\n" "svuint64_t svunpkhi_u64(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))\n" "svuint16_t svunpkhi_u16(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))\n" "svbool_t svunpklo_b(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))\n" "svint32_t svunpklo_s32(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))\n" "svint64_t svunpklo_s64(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))\n" "svint16_t svunpklo_s16(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))\n" "svuint32_t svunpklo_u32(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))\n" "svuint64_t svunpklo_u64(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))\n" "svuint16_t svunpklo_u16(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))\n" "svuint8_t svuzp1_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))\n" "svuint32_t svuzp1_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))\n" "svuint64_t svuzp1_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))\n" "svuint16_t svuzp1_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))\n" "svint8_t svuzp1_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))\n" "svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))\n" "svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))\n" "svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))\n" "svint32_t svuzp1_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))\n" "svint64_t svuzp1_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))\n" "svint16_t svuzp1_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16)))\n" "svbool_t svuzp1_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32)))\n" "svbool_t svuzp1_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64)))\n" "svbool_t svuzp1_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8)))\n" "svbool_t svuzp1_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))\n" "svuint8_t svuzp2_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))\n" "svuint32_t svuzp2_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))\n" "svuint64_t svuzp2_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))\n" "svuint16_t svuzp2_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))\n" "svint8_t svuzp2_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))\n" "svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))\n" "svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))\n" "svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))\n" "svint32_t svuzp2_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))\n" "svint64_t svuzp2_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))\n" "svint16_t svuzp2_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16)))\n" "svbool_t svuzp2_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32)))\n" "svbool_t svuzp2_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64)))\n" "svbool_t svuzp2_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8)))\n" "svbool_t svuzp2_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))\n" "svbool_t svwhilele_b8_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))\n" "svbool_t svwhilele_b32_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))\n" "svbool_t svwhilele_b64_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))\n" "svbool_t svwhilele_b16_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))\n" "svbool_t svwhilele_b8_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))\n" "svbool_t svwhilele_b32_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))\n" "svbool_t svwhilele_b64_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))\n" "svbool_t svwhilele_b16_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))\n" "svbool_t svwhilele_b8_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))\n" "svbool_t svwhilele_b32_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))\n" "svbool_t svwhilele_b64_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))\n" "svbool_t svwhilele_b16_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))\n" "svbool_t svwhilele_b8_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))\n" "svbool_t svwhilele_b32_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))\n" "svbool_t svwhilele_b64_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))\n" "svbool_t svwhilele_b16_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))\n" "svbool_t svwhilelt_b8_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))\n" "svbool_t svwhilelt_b32_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))\n" "svbool_t svwhilelt_b64_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))\n" "svbool_t svwhilelt_b16_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))\n" "svbool_t svwhilelt_b8_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))\n" "svbool_t svwhilelt_b32_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))\n" "svbool_t svwhilelt_b64_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))\n" "svbool_t svwhilelt_b16_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))\n" "svbool_t svwhilelt_b8_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))\n" "svbool_t svwhilelt_b32_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))\n" "svbool_t svwhilelt_b64_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))\n" "svbool_t svwhilelt_b16_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))\n" "svbool_t svwhilelt_b8_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))\n" "svbool_t svwhilelt_b32_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))\n" "svbool_t svwhilelt_b64_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))\n" "svbool_t svwhilelt_b16_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr)))\n" "void svwrffr(svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))\n" "svuint8_t svzip1_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))\n" "svuint32_t svzip1_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))\n" "svuint64_t svzip1_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))\n" "svuint16_t svzip1_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))\n" "svint8_t svzip1_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))\n" "svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))\n" "svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))\n" "svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))\n" "svint32_t svzip1_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))\n" "svint64_t svzip1_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))\n" "svint16_t svzip1_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16)))\n" "svbool_t svzip1_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32)))\n" "svbool_t svzip1_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64)))\n" "svbool_t svzip1_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8)))\n" "svbool_t svzip1_b8(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))\n" "svuint8_t svzip2_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))\n" "svuint32_t svzip2_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))\n" "svuint64_t svzip2_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))\n" "svuint16_t svzip2_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))\n" "svint8_t svzip2_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))\n" "svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))\n" "svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))\n" "svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))\n" "svint32_t svzip2_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))\n" "svint64_t svzip2_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))\n" "svint16_t svzip2_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16)))\n" "svbool_t svzip2_b16(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32)))\n" "svbool_t svzip2_b32(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64)))\n" "svbool_t svzip2_b64(svbool_t, svbool_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8)))\n" "svbool_t svzip2_b8(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))\n" "svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))\n" "svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))\n" "svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))\n" "svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))\n" "svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))\n" "svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))\n" "svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))\n" "svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))\n" "svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))\n" "svint8_t svabd_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))\n" "svint32_t svabd_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))\n" "svint64_t svabd_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))\n" "svint16_t svabd_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))\n" "svint8_t svabd_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))\n" "svint32_t svabd_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))\n" "svint64_t svabd_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))\n" "svint16_t svabd_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))\n" "svint8_t svabd_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))\n" "svint32_t svabd_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))\n" "svint64_t svabd_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))\n" "svint16_t svabd_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))\n" "svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))\n" "svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))\n" "svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))\n" "svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))\n" "svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))\n" "svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))\n" "svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))\n" "svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))\n" "svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))\n" "svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))\n" "svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))\n" "svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))\n" "svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))\n" "svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))\n" "svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))\n" "svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))\n" "svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))\n" "svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))\n" "svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))\n" "svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))\n" "svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))\n" "svint8_t svabd_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))\n" "svint32_t svabd_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))\n" "svint64_t svabd_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))\n" "svint16_t svabd_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))\n" "svint8_t svabd_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))\n" "svint32_t svabd_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))\n" "svint64_t svabd_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))\n" "svint16_t svabd_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))\n" "svint8_t svabd_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))\n" "svint32_t svabd_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))\n" "svint64_t svabd_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))\n" "svint16_t svabd_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))\n" "svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))\n" "svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))\n" "svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))\n" "svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))\n" "svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))\n" "svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))\n" "svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))\n" "svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))\n" "svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))\n" "svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))\n" "svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))\n" "svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))\n" "svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))\n" "svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))\n" "svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))\n" "svfloat64_t svabs_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))\n" "svfloat32_t svabs_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))\n" "svfloat16_t svabs_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))\n" "svfloat64_t svabs_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))\n" "svfloat32_t svabs_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))\n" "svfloat16_t svabs_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))\n" "svint8_t svabs_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))\n" "svint32_t svabs_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))\n" "svint64_t svabs_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))\n" "svint16_t svabs_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))\n" "svint8_t svabs_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))\n" "svint32_t svabs_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))\n" "svint64_t svabs_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))\n" "svint16_t svabs_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))\n" "svint8_t svabs_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))\n" "svint32_t svabs_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))\n" "svint64_t svabs_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))\n" "svint16_t svabs_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))\n" "svbool_t svacge(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))\n" "svbool_t svacge(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))\n" "svbool_t svacge(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))\n" "svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))\n" "svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))\n" "svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))\n" "svbool_t svacgt(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))\n" "svbool_t svacgt(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))\n" "svbool_t svacgt(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))\n" "svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))\n" "svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))\n" "svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))\n" "svbool_t svacle(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))\n" "svbool_t svacle(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))\n" "svbool_t svacle(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))\n" "svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))\n" "svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))\n" "svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))\n" "svbool_t svaclt(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))\n" "svbool_t svaclt(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))\n" "svbool_t svaclt(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))\n" "svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))\n" "svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))\n" "svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))\n" "svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))\n" "svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))\n" "svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))\n" "svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))\n" "svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))\n" "svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))\n" "svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))\n" "svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))\n" "svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))\n" "svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))\n" "svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))\n" "svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))\n" "svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))\n" "svint8_t svadd_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))\n" "svint32_t svadd_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))\n" "svint64_t svadd_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))\n" "svint16_t svadd_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))\n" "svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))\n" "svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))\n" "svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))\n" "svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))\n" "svint8_t svadd_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))\n" "svint32_t svadd_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))\n" "svint64_t svadd_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))\n" "svint16_t svadd_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))\n" "svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))\n" "svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))\n" "svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))\n" "svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))\n" "svint8_t svadd_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))\n" "svint32_t svadd_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))\n" "svint64_t svadd_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))\n" "svint16_t svadd_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))\n" "svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))\n" "svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))\n" "svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))\n" "svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))\n" "svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))\n" "svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))\n" "svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))\n" "svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))\n" "svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))\n" "svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))\n" "svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))\n" "svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))\n" "svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))\n" "svint8_t svadd_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))\n" "svint32_t svadd_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))\n" "svint64_t svadd_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))\n" "svint16_t svadd_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))\n" "svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))\n" "svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))\n" "svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))\n" "svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))\n" "svint8_t svadd_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))\n" "svint32_t svadd_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))\n" "svint64_t svadd_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))\n" "svint16_t svadd_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))\n" "svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))\n" "svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))\n" "svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))\n" "svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))\n" "svint8_t svadd_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))\n" "svint32_t svadd_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))\n" "svint64_t svadd_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))\n" "svint16_t svadd_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))\n" "float64_t svadda(svbool_t, float64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))\n" "float32_t svadda(svbool_t, float32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))\n" "float16_t svadda(svbool_t, float16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))\n" "int64_t svaddv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))\n" "int64_t svaddv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))\n" "int64_t svaddv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))\n" "int64_t svaddv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))\n" "uint64_t svaddv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))\n" "uint64_t svaddv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))\n" "uint64_t svaddv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))\n" "uint64_t svaddv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))\n" "float64_t svaddv(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))\n" "float32_t svaddv(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))\n" "float16_t svaddv(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))\n" "svuint32_t svadrb_offset(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))\n" "svuint64_t svadrb_offset(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))\n" "svuint32_t svadrb_offset(svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))\n" "svuint64_t svadrb_offset(svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))\n" "svuint32_t svadrd_index(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))\n" "svuint64_t svadrd_index(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))\n" "svuint32_t svadrd_index(svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))\n" "svuint64_t svadrd_index(svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))\n" "svuint32_t svadrh_index(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))\n" "svuint64_t svadrh_index(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))\n" "svuint32_t svadrh_index(svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))\n" "svuint64_t svadrh_index(svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))\n" "svuint32_t svadrw_index(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))\n" "svuint64_t svadrw_index(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))\n" "svuint32_t svadrw_index(svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))\n" "svuint64_t svadrw_index(svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))\n" "svbool_t svand_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))\n" "svuint8_t svand_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))\n" "svuint32_t svand_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))\n" "svuint64_t svand_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))\n" "svuint16_t svand_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))\n" "svint8_t svand_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))\n" "svint32_t svand_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))\n" "svint64_t svand_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))\n" "svint16_t svand_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))\n" "svuint8_t svand_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))\n" "svuint32_t svand_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))\n" "svuint64_t svand_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))\n" "svuint16_t svand_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))\n" "svint8_t svand_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))\n" "svint32_t svand_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))\n" "svint64_t svand_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))\n" "svint16_t svand_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))\n" "svuint8_t svand_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))\n" "svuint32_t svand_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))\n" "svuint64_t svand_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))\n" "svuint16_t svand_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))\n" "svint8_t svand_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))\n" "svint32_t svand_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))\n" "svint64_t svand_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))\n" "svint16_t svand_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))\n" "svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))\n" "svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))\n" "svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))\n" "svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))\n" "svint8_t svand_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))\n" "svint32_t svand_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))\n" "svint64_t svand_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))\n" "svint16_t svand_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))\n" "svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))\n" "svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))\n" "svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))\n" "svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))\n" "svint8_t svand_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))\n" "svint32_t svand_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))\n" "svint64_t svand_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))\n" "svint16_t svand_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))\n" "svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))\n" "svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))\n" "svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))\n" "svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))\n" "svint8_t svand_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))\n" "svint32_t svand_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))\n" "svint64_t svand_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))\n" "svint16_t svand_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))\n" "uint8_t svandv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))\n" "uint32_t svandv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))\n" "uint64_t svandv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))\n" "uint16_t svandv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))\n" "int8_t svandv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))\n" "int32_t svandv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))\n" "int64_t svandv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))\n" "int16_t svandv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))\n" "svint8_t svasr_m(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))\n" "svint32_t svasr_m(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))\n" "svint64_t svasr_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))\n" "svint16_t svasr_m(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))\n" "svint8_t svasr_x(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))\n" "svint32_t svasr_x(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))\n" "svint64_t svasr_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))\n" "svint16_t svasr_x(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))\n" "svint8_t svasr_z(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))\n" "svint32_t svasr_z(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))\n" "svint64_t svasr_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))\n" "svint16_t svasr_z(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))\n" "svint8_t svasr_m(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))\n" "svint32_t svasr_m(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))\n" "svint64_t svasr_m(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))\n" "svint16_t svasr_m(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))\n" "svint8_t svasr_x(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))\n" "svint32_t svasr_x(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))\n" "svint64_t svasr_x(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))\n" "svint16_t svasr_x(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))\n" "svint8_t svasr_z(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))\n" "svint32_t svasr_z(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))\n" "svint64_t svasr_z(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))\n" "svint16_t svasr_z(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))\n" "svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))\n" "svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))\n" "svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))\n" "svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))\n" "svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))\n" "svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))\n" "svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))\n" "svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))\n" "svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))\n" "svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))\n" "svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))\n" "svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))\n" "svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))\n" "svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))\n" "svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))\n" "svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))\n" "svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))\n" "svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))\n" "svint8_t svasrd_m(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))\n" "svint32_t svasrd_m(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))\n" "svint64_t svasrd_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))\n" "svint16_t svasrd_m(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))\n" "svint8_t svasrd_x(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))\n" "svint32_t svasrd_x(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))\n" "svint64_t svasrd_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))\n" "svint16_t svasrd_x(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))\n" "svint8_t svasrd_z(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))\n" "svint32_t svasrd_z(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))\n" "svint64_t svasrd_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))\n" "svint16_t svasrd_z(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))\n" "svbool_t svbic_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))\n" "svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))\n" "svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))\n" "svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))\n" "svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))\n" "svint8_t svbic_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))\n" "svint32_t svbic_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))\n" "svint64_t svbic_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))\n" "svint16_t svbic_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))\n" "svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))\n" "svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))\n" "svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))\n" "svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))\n" "svint8_t svbic_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))\n" "svint32_t svbic_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))\n" "svint64_t svbic_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))\n" "svint16_t svbic_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))\n" "svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))\n" "svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))\n" "svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))\n" "svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))\n" "svint8_t svbic_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))\n" "svint32_t svbic_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))\n" "svint64_t svbic_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))\n" "svint16_t svbic_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))\n" "svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))\n" "svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))\n" "svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))\n" "svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))\n" "svint8_t svbic_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))\n" "svint32_t svbic_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))\n" "svint64_t svbic_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))\n" "svint16_t svbic_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))\n" "svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))\n" "svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))\n" "svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))\n" "svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))\n" "svint8_t svbic_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))\n" "svint32_t svbic_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))\n" "svint64_t svbic_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))\n" "svint16_t svbic_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))\n" "svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))\n" "svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))\n" "svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))\n" "svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))\n" "svint8_t svbic_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))\n" "svint32_t svbic_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))\n" "svint64_t svbic_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))\n" "svint16_t svbic_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))\n" "svbool_t svbrka_m(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))\n" "svbool_t svbrka_z(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))\n" "svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))\n" "svbool_t svbrkb_z(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))\n" "svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))\n" "svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))\n" "svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))\n" "svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))\n" "svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))\n" "svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))\n" "svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))\n" "svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))\n" "svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))\n" "svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))\n" "svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))\n" "svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))\n" "uint8_t svclasta(svbool_t, uint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))\n" "uint32_t svclasta(svbool_t, uint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))\n" "uint64_t svclasta(svbool_t, uint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))\n" "uint16_t svclasta(svbool_t, uint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))\n" "int8_t svclasta(svbool_t, int8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))\n" "float64_t svclasta(svbool_t, float64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))\n" "float32_t svclasta(svbool_t, float32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))\n" "float16_t svclasta(svbool_t, float16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))\n" "int32_t svclasta(svbool_t, int32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))\n" "int64_t svclasta(svbool_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))\n" "int16_t svclasta(svbool_t, int16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))\n" "svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))\n" "svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))\n" "svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))\n" "svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))\n" "svint8_t svclasta(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))\n" "svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))\n" "svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))\n" "svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))\n" "svint32_t svclasta(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))\n" "svint64_t svclasta(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))\n" "svint16_t svclasta(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))\n" "uint8_t svclastb(svbool_t, uint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))\n" "uint32_t svclastb(svbool_t, uint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))\n" "uint64_t svclastb(svbool_t, uint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))\n" "uint16_t svclastb(svbool_t, uint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))\n" "int8_t svclastb(svbool_t, int8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))\n" "float64_t svclastb(svbool_t, float64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))\n" "float32_t svclastb(svbool_t, float32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))\n" "float16_t svclastb(svbool_t, float16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))\n" "int32_t svclastb(svbool_t, int32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))\n" "int64_t svclastb(svbool_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))\n" "int16_t svclastb(svbool_t, int16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))\n" "svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))\n" "svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))\n" "svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))\n" "svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))\n" "svint8_t svclastb(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))\n" "svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))\n" "svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))\n" "svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))\n" "svint32_t svclastb(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))\n" "svint64_t svclastb(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))\n" "svint16_t svclastb(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))\n" "svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))\n" "svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))\n" "svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))\n" "svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))\n" "svuint8_t svcls_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))\n" "svuint32_t svcls_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))\n" "svuint64_t svcls_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))\n" "svuint16_t svcls_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))\n" "svuint8_t svcls_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))\n" "svuint32_t svcls_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))\n" "svuint64_t svcls_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))\n" "svuint16_t svcls_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))\n" "svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))\n" "svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))\n" "svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))\n" "svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))\n" "svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))\n" "svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))\n" "svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))\n" "svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))\n" "svuint8_t svclz_x(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))\n" "svuint32_t svclz_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))\n" "svuint64_t svclz_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))\n" "svuint16_t svclz_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))\n" "svuint8_t svclz_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))\n" "svuint32_t svclz_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))\n" "svuint64_t svclz_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))\n" "svuint16_t svclz_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))\n" "svuint8_t svclz_z(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))\n" "svuint32_t svclz_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))\n" "svuint64_t svclz_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))\n" "svuint16_t svclz_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))\n" "svuint8_t svclz_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))\n" "svuint32_t svclz_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))\n" "svuint64_t svclz_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))\n" "svuint16_t svclz_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))\n" "svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))\n" "svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))\n" "svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))\n" "svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))\n" "svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))\n" "svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))\n" "svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))\n" "svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))\n" "svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))\n" "svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))\n" "svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))\n" "svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))\n" "svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))\n" "svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))\n" "svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))\n" "svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))\n" "svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))\n" "svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))\n" "svbool_t svcmpeq(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))\n" "svbool_t svcmpeq(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))\n" "svbool_t svcmpeq(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))\n" "svbool_t svcmpeq(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))\n" "svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))\n" "svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))\n" "svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))\n" "svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))\n" "svbool_t svcmpeq(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))\n" "svbool_t svcmpeq(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))\n" "svbool_t svcmpeq(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))\n" "svbool_t svcmpeq(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))\n" "svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))\n" "svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))\n" "svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))\n" "svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))\n" "svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))\n" "svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))\n" "svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))\n" "svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))\n" "svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))\n" "svbool_t svcmpge(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))\n" "svbool_t svcmpge(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))\n" "svbool_t svcmpge(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))\n" "svbool_t svcmpge(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))\n" "svbool_t svcmpge(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))\n" "svbool_t svcmpge(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))\n" "svbool_t svcmpge(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))\n" "svbool_t svcmpge(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))\n" "svbool_t svcmpge(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))\n" "svbool_t svcmpge(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))\n" "svbool_t svcmpge(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))\n" "svbool_t svcmpge(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))\n" "svbool_t svcmpge(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))\n" "svbool_t svcmpge(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))\n" "svbool_t svcmpge(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))\n" "svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))\n" "svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))\n" "svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))\n" "svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))\n" "svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))\n" "svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))\n" "svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))\n" "svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))\n" "svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))\n" "svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))\n" "svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))\n" "svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))\n" "svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))\n" "svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))\n" "svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))\n" "svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))\n" "svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))\n" "svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))\n" "svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))\n" "svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))\n" "svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))\n" "svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))\n" "svbool_t svcmpgt(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))\n" "svbool_t svcmpgt(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))\n" "svbool_t svcmpgt(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))\n" "svbool_t svcmpgt(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))\n" "svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))\n" "svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))\n" "svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))\n" "svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))\n" "svbool_t svcmpgt(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))\n" "svbool_t svcmpgt(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))\n" "svbool_t svcmpgt(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))\n" "svbool_t svcmpgt(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))\n" "svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))\n" "svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))\n" "svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))\n" "svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))\n" "svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))\n" "svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))\n" "svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))\n" "svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))\n" "svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))\n" "svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))\n" "svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))\n" "svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))\n" "svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))\n" "svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))\n" "svbool_t svcmple(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))\n" "svbool_t svcmple(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))\n" "svbool_t svcmple(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))\n" "svbool_t svcmple(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))\n" "svbool_t svcmple(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))\n" "svbool_t svcmple(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))\n" "svbool_t svcmple(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))\n" "svbool_t svcmple(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))\n" "svbool_t svcmple(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))\n" "svbool_t svcmple(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))\n" "svbool_t svcmple(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))\n" "svbool_t svcmple(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))\n" "svbool_t svcmple(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))\n" "svbool_t svcmple(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))\n" "svbool_t svcmple(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))\n" "svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))\n" "svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))\n" "svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))\n" "svbool_t svcmple(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))\n" "svbool_t svcmple(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))\n" "svbool_t svcmple(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))\n" "svbool_t svcmple(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))\n" "svbool_t svcmple_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))\n" "svbool_t svcmple_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))\n" "svbool_t svcmple_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))\n" "svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))\n" "svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))\n" "svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))\n" "svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))\n" "svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))\n" "svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))\n" "svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))\n" "svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))\n" "svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))\n" "svbool_t svcmplt(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))\n" "svbool_t svcmplt(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))\n" "svbool_t svcmplt(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))\n" "svbool_t svcmplt(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))\n" "svbool_t svcmplt(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))\n" "svbool_t svcmplt(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))\n" "svbool_t svcmplt(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))\n" "svbool_t svcmplt(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))\n" "svbool_t svcmplt(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))\n" "svbool_t svcmplt(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))\n" "svbool_t svcmplt(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))\n" "svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))\n" "svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))\n" "svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))\n" "svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))\n" "svbool_t svcmplt(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))\n" "svbool_t svcmplt(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))\n" "svbool_t svcmplt(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))\n" "svbool_t svcmplt(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))\n" "svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))\n" "svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))\n" "svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))\n" "svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))\n" "svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))\n" "svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))\n" "svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))\n" "svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))\n" "svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))\n" "svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))\n" "svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))\n" "svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))\n" "svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))\n" "svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))\n" "svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))\n" "svbool_t svcmpne(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))\n" "svbool_t svcmpne(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))\n" "svbool_t svcmpne(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))\n" "svbool_t svcmpne(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))\n" "svbool_t svcmpne(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))\n" "svbool_t svcmpne(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))\n" "svbool_t svcmpne(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))\n" "svbool_t svcmpne(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))\n" "svbool_t svcmpne(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))\n" "svbool_t svcmpne(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))\n" "svbool_t svcmpne(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))\n" "svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))\n" "svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))\n" "svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))\n" "svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))\n" "svbool_t svcmpne(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))\n" "svbool_t svcmpne(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))\n" "svbool_t svcmpne(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))\n" "svbool_t svcmpne(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))\n" "svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))\n" "svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))\n" "svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))\n" "svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))\n" "svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))\n" "svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))\n" "svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))\n" "svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))\n" "svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))\n" "svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))\n" "svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))\n" "svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))\n" "svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))\n" "svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))\n" "svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))\n" "svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))\n" "svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))\n" "svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))\n" "svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))\n" "svint8_t svcnot_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))\n" "svint32_t svcnot_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))\n" "svint64_t svcnot_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))\n" "svint16_t svcnot_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))\n" "svuint8_t svcnot_x(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))\n" "svuint32_t svcnot_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))\n" "svuint64_t svcnot_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))\n" "svuint16_t svcnot_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))\n" "svint8_t svcnot_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))\n" "svint32_t svcnot_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))\n" "svint64_t svcnot_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))\n" "svint16_t svcnot_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))\n" "svuint8_t svcnot_z(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))\n" "svuint32_t svcnot_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))\n" "svuint64_t svcnot_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))\n" "svuint16_t svcnot_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))\n" "svint8_t svcnot_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))\n" "svint32_t svcnot_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))\n" "svint64_t svcnot_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))\n" "svint16_t svcnot_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))\n" "svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))\n" "svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))\n" "svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))\n" "svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))\n" "svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))\n" "svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))\n" "svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))\n" "svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))\n" "svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))\n" "svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))\n" "svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))\n" "svuint8_t svcnt_x(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))\n" "svuint32_t svcnt_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))\n" "svuint64_t svcnt_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))\n" "svuint16_t svcnt_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))\n" "svuint8_t svcnt_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))\n" "svuint64_t svcnt_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))\n" "svuint32_t svcnt_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))\n" "svuint16_t svcnt_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))\n" "svuint32_t svcnt_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))\n" "svuint64_t svcnt_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))\n" "svuint16_t svcnt_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))\n" "svuint8_t svcnt_z(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))\n" "svuint32_t svcnt_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))\n" "svuint64_t svcnt_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))\n" "svuint16_t svcnt_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))\n" "svuint8_t svcnt_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))\n" "svuint64_t svcnt_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))\n" "svuint32_t svcnt_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))\n" "svuint16_t svcnt_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))\n" "svuint32_t svcnt_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))\n" "svuint64_t svcnt_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))\n" "svuint16_t svcnt_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))\n" "svuint32_t svcompact(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))\n" "svuint64_t svcompact(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))\n" "svfloat64_t svcompact(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))\n" "svfloat32_t svcompact(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))\n" "svint32_t svcompact(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))\n" "svint64_t svcompact(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))\n" "svuint8x2_t svcreate2(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))\n" "svuint32x2_t svcreate2(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))\n" "svuint64x2_t svcreate2(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))\n" "svuint16x2_t svcreate2(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))\n" "svint8x2_t svcreate2(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))\n" "svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))\n" "svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))\n" "svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))\n" "svint32x2_t svcreate2(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))\n" "svint64x2_t svcreate2(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))\n" "svint16x2_t svcreate2(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))\n" "svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))\n" "svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))\n" "svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))\n" "svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))\n" "svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))\n" "svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))\n" "svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))\n" "svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))\n" "svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))\n" "svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))\n" "svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))\n" "svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))\n" "svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))\n" "svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))\n" "svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))\n" "svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))\n" "svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))\n" "svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))\n" "svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))\n" "svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))\n" "svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))\n" "svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))\n" "svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))\n" "svfloat16_t svcvt_f16_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))\n" "svfloat16_t svcvt_f16_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))\n" "svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))\n" "svfloat32_t svcvt_f32_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))\n" "svfloat32_t svcvt_f32_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))\n" "svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))\n" "svfloat64_t svcvt_f64_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))\n" "svfloat64_t svcvt_f64_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))\n" "svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))\n" "svint16_t svcvt_s16_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))\n" "svint16_t svcvt_s16_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))\n" "svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))\n" "svint32_t svcvt_s32_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))\n" "svint32_t svcvt_s32_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))\n" "svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))\n" "svint32_t svcvt_s32_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))\n" "svint32_t svcvt_s32_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))\n" "svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))\n" "svint32_t svcvt_s32_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))\n" "svint32_t svcvt_s32_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))\n" "svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))\n" "svint64_t svcvt_s64_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))\n" "svint64_t svcvt_s64_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))\n" "svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))\n" "svint64_t svcvt_s64_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))\n" "svint64_t svcvt_s64_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))\n" "svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))\n" "svint64_t svcvt_s64_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))\n" "svint64_t svcvt_s64_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))\n" "svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))\n" "svuint16_t svcvt_u16_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))\n" "svuint16_t svcvt_u16_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))\n" "svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))\n" "svuint32_t svcvt_u32_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))\n" "svuint32_t svcvt_u32_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))\n" "svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))\n" "svuint32_t svcvt_u32_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))\n" "svuint32_t svcvt_u32_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))\n" "svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))\n" "svuint32_t svcvt_u32_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))\n" "svuint32_t svcvt_u32_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))\n" "svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))\n" "svuint64_t svcvt_u64_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))\n" "svuint64_t svcvt_u64_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))\n" "svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))\n" "svuint64_t svcvt_u64_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))\n" "svuint64_t svcvt_u64_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))\n" "svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))\n" "svuint64_t svcvt_u64_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))\n" "svuint64_t svcvt_u64_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))\n" "svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))\n" "svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))\n" "svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))\n" "svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))\n" "svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))\n" "svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))\n" "svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))\n" "svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))\n" "svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))\n" "svint32_t svdiv_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))\n" "svint64_t svdiv_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))\n" "svint32_t svdiv_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))\n" "svint64_t svdiv_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))\n" "svint32_t svdiv_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))\n" "svint64_t svdiv_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))\n" "svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))\n" "svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))\n" "svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))\n" "svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))\n" "svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))\n" "svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))\n" "svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))\n" "svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))\n" "svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))\n" "svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))\n" "svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))\n" "svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))\n" "svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))\n" "svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))\n" "svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))\n" "svint32_t svdiv_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))\n" "svint64_t svdiv_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))\n" "svint32_t svdiv_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))\n" "svint64_t svdiv_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))\n" "svint32_t svdiv_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))\n" "svint64_t svdiv_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))\n" "svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))\n" "svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))\n" "svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))\n" "svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))\n" "svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))\n" "svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))\n" "svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))\n" "svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))\n" "svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))\n" "svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))\n" "svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))\n" "svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))\n" "svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))\n" "svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))\n" "svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))\n" "svint32_t svdivr_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))\n" "svint64_t svdivr_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))\n" "svint32_t svdivr_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))\n" "svint64_t svdivr_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))\n" "svint32_t svdivr_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))\n" "svint64_t svdivr_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))\n" "svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))\n" "svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))\n" "svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))\n" "svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))\n" "svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))\n" "svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))\n" "svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))\n" "svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))\n" "svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))\n" "svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))\n" "svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))\n" "svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))\n" "svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))\n" "svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))\n" "svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))\n" "svint32_t svdivr_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))\n" "svint64_t svdivr_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))\n" "svint32_t svdivr_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))\n" "svint64_t svdivr_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))\n" "svint32_t svdivr_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))\n" "svint64_t svdivr_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))\n" "svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))\n" "svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))\n" "svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))\n" "svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))\n" "svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))\n" "svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))\n" "svint32_t svdot(svint32_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))\n" "svint64_t svdot(svint64_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))\n" "svuint32_t svdot(svuint32_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))\n" "svuint64_t svdot(svuint64_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))\n" "svint32_t svdot(svint32_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))\n" "svint64_t svdot(svint64_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))\n" "svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))\n" "svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))\n" "svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))\n" "svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))\n" "svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))\n" "svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))\n" "svuint8_t svdup_u8(uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))\n" "svuint32_t svdup_u32(uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))\n" "svuint64_t svdup_u64(uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))\n" "svuint16_t svdup_u16(uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))\n" "svint8_t svdup_s8(int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))\n" "svfloat64_t svdup_f64(float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))\n" "svfloat32_t svdup_f32(float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))\n" "svfloat16_t svdup_f16(float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))\n" "svint32_t svdup_s32(int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))\n" "svint64_t svdup_s64(int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))\n" "svint16_t svdup_s16(int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))\n" "svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))\n" "svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))\n" "svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))\n" "svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))\n" "svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))\n" "svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))\n" "svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))\n" "svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))\n" "svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))\n" "svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))\n" "svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))\n" "svbool_t svdup_b8(bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))\n" "svbool_t svdup_b32(bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))\n" "svbool_t svdup_b64(bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))\n" "svbool_t svdup_b16(bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))\n" "svuint8_t svdup_u8_x(svbool_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))\n" "svuint32_t svdup_u32_x(svbool_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))\n" "svuint64_t svdup_u64_x(svbool_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))\n" "svuint16_t svdup_u16_x(svbool_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))\n" "svint8_t svdup_s8_x(svbool_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))\n" "svfloat64_t svdup_f64_x(svbool_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))\n" "svfloat32_t svdup_f32_x(svbool_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))\n" "svfloat16_t svdup_f16_x(svbool_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))\n" "svint32_t svdup_s32_x(svbool_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))\n" "svint64_t svdup_s64_x(svbool_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))\n" "svint16_t svdup_s16_x(svbool_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))\n" "svuint8_t svdup_u8_z(svbool_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))\n" "svuint32_t svdup_u32_z(svbool_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))\n" "svuint64_t svdup_u64_z(svbool_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))\n" "svuint16_t svdup_u16_z(svbool_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))\n" "svint8_t svdup_s8_z(svbool_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))\n" "svfloat64_t svdup_f64_z(svbool_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))\n" "svfloat32_t svdup_f32_z(svbool_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))\n" "svfloat16_t svdup_f16_z(svbool_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))\n" "svint32_t svdup_s32_z(svbool_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))\n" "svint64_t svdup_s64_z(svbool_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))\n" "svint16_t svdup_s16_z(svbool_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))\n" "svuint8_t svdup_lane(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))\n" "svuint32_t svdup_lane(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))\n" "svuint64_t svdup_lane(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))\n" "svuint16_t svdup_lane(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))\n" "svint8_t svdup_lane(svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))\n" "svfloat64_t svdup_lane(svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))\n" "svfloat32_t svdup_lane(svfloat32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))\n" "svfloat16_t svdup_lane(svfloat16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))\n" "svint32_t svdup_lane(svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))\n" "svint64_t svdup_lane(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))\n" "svint16_t svdup_lane(svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))\n" "svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))\n" "svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))\n" "svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))\n" "svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))\n" "svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))\n" "svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))\n" "svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))\n" "svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))\n" "svuint64_t svdupq_u64(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))\n" "svfloat64_t svdupq_f64(float64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))\n" "svint64_t svdupq_s64(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))\n" "svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))\n" "svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))\n" "svbool_t svdupq_b32(bool, bool, bool, bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))\n" "svbool_t svdupq_b64(bool, bool);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))\n" "svuint8_t svdupq_lane(svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))\n" "svuint32_t svdupq_lane(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))\n" "svuint64_t svdupq_lane(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))\n" "svuint16_t svdupq_lane(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))\n" "svint8_t svdupq_lane(svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))\n" "svfloat64_t svdupq_lane(svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))\n" "svfloat32_t svdupq_lane(svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))\n" "svfloat16_t svdupq_lane(svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))\n" "svint32_t svdupq_lane(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))\n" "svint64_t svdupq_lane(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))\n" "svint16_t svdupq_lane(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))\n" "svbool_t sveor_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))\n" "svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))\n" "svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))\n" "svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))\n" "svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))\n" "svint8_t sveor_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))\n" "svint32_t sveor_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))\n" "svint64_t sveor_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))\n" "svint16_t sveor_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))\n" "svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))\n" "svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))\n" "svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))\n" "svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))\n" "svint8_t sveor_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))\n" "svint32_t sveor_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))\n" "svint64_t sveor_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))\n" "svint16_t sveor_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))\n" "svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))\n" "svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))\n" "svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))\n" "svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))\n" "svint8_t sveor_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))\n" "svint32_t sveor_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))\n" "svint64_t sveor_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))\n" "svint16_t sveor_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))\n" "svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))\n" "svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))\n" "svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))\n" "svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))\n" "svint8_t sveor_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))\n" "svint32_t sveor_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))\n" "svint64_t sveor_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))\n" "svint16_t sveor_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))\n" "svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))\n" "svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))\n" "svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))\n" "svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))\n" "svint8_t sveor_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))\n" "svint32_t sveor_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))\n" "svint64_t sveor_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))\n" "svint16_t sveor_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))\n" "svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))\n" "svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))\n" "svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))\n" "svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))\n" "svint8_t sveor_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))\n" "svint32_t sveor_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))\n" "svint64_t sveor_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))\n" "svint16_t sveor_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))\n" "uint8_t sveorv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))\n" "uint32_t sveorv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))\n" "uint64_t sveorv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))\n" "uint16_t sveorv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))\n" "int8_t sveorv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))\n" "int32_t sveorv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))\n" "int64_t sveorv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))\n" "int16_t sveorv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))\n" "svfloat64_t svexpa(svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))\n" "svfloat32_t svexpa(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))\n" "svfloat16_t svexpa(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))\n" "svuint8_t svext(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))\n" "svuint32_t svext(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))\n" "svuint64_t svext(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))\n" "svuint16_t svext(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))\n" "svint8_t svext(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))\n" "svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))\n" "svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))\n" "svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))\n" "svint32_t svext(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))\n" "svint64_t svext(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))\n" "svint16_t svext(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))\n" "svint32_t svextb_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))\n" "svint64_t svextb_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))\n" "svint16_t svextb_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))\n" "svint32_t svextb_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))\n" "svint64_t svextb_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))\n" "svint16_t svextb_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))\n" "svint32_t svextb_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))\n" "svint64_t svextb_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))\n" "svint16_t svextb_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))\n" "svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))\n" "svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))\n" "svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))\n" "svuint32_t svextb_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))\n" "svuint64_t svextb_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))\n" "svuint16_t svextb_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))\n" "svuint32_t svextb_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))\n" "svuint64_t svextb_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))\n" "svuint16_t svextb_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))\n" "svint32_t svexth_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))\n" "svint64_t svexth_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))\n" "svint32_t svexth_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))\n" "svint64_t svexth_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))\n" "svint32_t svexth_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))\n" "svint64_t svexth_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))\n" "svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))\n" "svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))\n" "svuint32_t svexth_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))\n" "svuint64_t svexth_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))\n" "svuint32_t svexth_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))\n" "svuint64_t svexth_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))\n" "svint64_t svextw_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))\n" "svint64_t svextw_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))\n" "svint64_t svextw_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))\n" "svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))\n" "svuint64_t svextw_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))\n" "svuint64_t svextw_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))\n" "svuint8_t svget2(svuint8x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))\n" "svuint32_t svget2(svuint32x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))\n" "svuint64_t svget2(svuint64x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))\n" "svuint16_t svget2(svuint16x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))\n" "svint8_t svget2(svint8x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))\n" "svfloat64_t svget2(svfloat64x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))\n" "svfloat32_t svget2(svfloat32x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))\n" "svfloat16_t svget2(svfloat16x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))\n" "svint32_t svget2(svint32x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))\n" "svint64_t svget2(svint64x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))\n" "svint16_t svget2(svint16x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))\n" "svuint8_t svget3(svuint8x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))\n" "svuint32_t svget3(svuint32x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))\n" "svuint64_t svget3(svuint64x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))\n" "svuint16_t svget3(svuint16x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))\n" "svint8_t svget3(svint8x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))\n" "svfloat64_t svget3(svfloat64x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))\n" "svfloat32_t svget3(svfloat32x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))\n" "svfloat16_t svget3(svfloat16x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))\n" "svint32_t svget3(svint32x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))\n" "svint64_t svget3(svint64x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))\n" "svint16_t svget3(svint16x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))\n" "svuint8_t svget4(svuint8x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))\n" "svuint32_t svget4(svuint32x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))\n" "svuint64_t svget4(svuint64x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))\n" "svuint16_t svget4(svuint16x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))\n" "svint8_t svget4(svint8x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))\n" "svfloat64_t svget4(svfloat64x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))\n" "svfloat32_t svget4(svfloat32x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))\n" "svfloat16_t svget4(svfloat16x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))\n" "svint32_t svget4(svint32x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))\n" "svint64_t svget4(svint64x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))\n" "svint16_t svget4(svint16x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))\n" "svuint8_t svinsr(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))\n" "svuint32_t svinsr(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))\n" "svuint64_t svinsr(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))\n" "svuint16_t svinsr(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))\n" "svint8_t svinsr(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))\n" "svfloat64_t svinsr(svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))\n" "svfloat32_t svinsr(svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))\n" "svfloat16_t svinsr(svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))\n" "svint32_t svinsr(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))\n" "svint64_t svinsr(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))\n" "svint16_t svinsr(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))\n" "uint8_t svlasta(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))\n" "uint32_t svlasta(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))\n" "uint64_t svlasta(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))\n" "uint16_t svlasta(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))\n" "int8_t svlasta(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))\n" "float64_t svlasta(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))\n" "float32_t svlasta(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))\n" "float16_t svlasta(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))\n" "int32_t svlasta(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))\n" "int64_t svlasta(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))\n" "int16_t svlasta(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))\n" "uint8_t svlastb(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))\n" "uint32_t svlastb(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))\n" "uint64_t svlastb(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))\n" "uint16_t svlastb(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))\n" "int8_t svlastb(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))\n" "float64_t svlastb(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))\n" "float32_t svlastb(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))\n" "float16_t svlastb(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))\n" "int32_t svlastb(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))\n" "int64_t svlastb(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))\n" "int16_t svlastb(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))\n" "svuint8_t svld1(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))\n" "svuint32_t svld1(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))\n" "svuint64_t svld1(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))\n" "svuint16_t svld1(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))\n" "svint8_t svld1(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))\n" "svfloat64_t svld1(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))\n" "svfloat32_t svld1(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))\n" "svfloat16_t svld1(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))\n" "svint32_t svld1(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))\n" "svint64_t svld1(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))\n" "svint16_t svld1(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))\n" "svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))\n" "svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))\n" "svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))\n" "svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))\n" "svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))\n" "svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))\n" "svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))\n" "svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))\n" "svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))\n" "svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))\n" "svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))\n" "svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))\n" "svuint32_t svld1_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))\n" "svuint64_t svld1_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))\n" "svfloat64_t svld1_gather_f64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))\n" "svfloat32_t svld1_gather_f32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))\n" "svint32_t svld1_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))\n" "svint64_t svld1_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))\n" "svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))\n" "svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))\n" "svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))\n" "svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))\n" "svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))\n" "svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))\n" "svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))\n" "svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))\n" "svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))\n" "svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))\n" "svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))\n" "svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))\n" "svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))\n" "svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))\n" "svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))\n" "svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))\n" "svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))\n" "svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))\n" "svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))\n" "svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))\n" "svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))\n" "svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))\n" "svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))\n" "svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))\n" "svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))\n" "svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))\n" "svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))\n" "svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))\n" "svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))\n" "svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))\n" "svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))\n" "svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))\n" "svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))\n" "svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))\n" "svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))\n" "svuint8_t svld1rq(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))\n" "svuint32_t svld1rq(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))\n" "svuint64_t svld1rq(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))\n" "svuint16_t svld1rq(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))\n" "svint8_t svld1rq(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))\n" "svfloat64_t svld1rq(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))\n" "svfloat32_t svld1rq(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))\n" "svfloat16_t svld1rq(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))\n" "svint32_t svld1rq(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))\n" "svint64_t svld1rq(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))\n" "svint16_t svld1rq(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))\n" "svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))\n" "svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))\n" "svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))\n" "svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))\n" "svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))\n" "svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))\n" "svint32_t svld1sb_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))\n" "svint64_t svld1sb_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))\n" "svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))\n" "svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))\n" "svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))\n" "svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))\n" "svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))\n" "svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))\n" "svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))\n" "svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))\n" "svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))\n" "svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))\n" "svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))\n" "svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))\n" "svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))\n" "svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))\n" "svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))\n" "svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))\n" "svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))\n" "svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))\n" "svint32_t svld1sh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))\n" "svint64_t svld1sh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))\n" "svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))\n" "svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))\n" "svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))\n" "svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))\n" "svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))\n" "svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))\n" "svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))\n" "svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))\n" "svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))\n" "svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))\n" "svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))\n" "svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))\n" "svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))\n" "svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))\n" "svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))\n" "svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))\n" "svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))\n" "svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))\n" "svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))\n" "svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))\n" "svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))\n" "svint64_t svld1sw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))\n" "svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))\n" "svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))\n" "svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))\n" "svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))\n" "svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))\n" "svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))\n" "svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))\n" "svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))\n" "svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))\n" "svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))\n" "svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))\n" "svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))\n" "svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))\n" "svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))\n" "svint32_t svld1ub_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))\n" "svint64_t svld1ub_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))\n" "svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))\n" "svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))\n" "svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))\n" "svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))\n" "svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))\n" "svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))\n" "svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))\n" "svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))\n" "svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))\n" "svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))\n" "svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))\n" "svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))\n" "svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))\n" "svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))\n" "svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))\n" "svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))\n" "svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))\n" "svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))\n" "svint32_t svld1uh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))\n" "svint64_t svld1uh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))\n" "svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))\n" "svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))\n" "svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))\n" "svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))\n" "svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))\n" "svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))\n" "svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))\n" "svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))\n" "svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))\n" "svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))\n" "svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))\n" "svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))\n" "svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))\n" "svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))\n" "svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))\n" "svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))\n" "svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))\n" "svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))\n" "svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))\n" "svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))\n" "svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))\n" "svint64_t svld1uw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))\n" "svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))\n" "svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))\n" "svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))\n" "svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))\n" "svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))\n" "svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))\n" "svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))\n" "svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))\n" "svuint8x2_t svld2(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))\n" "svuint32x2_t svld2(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))\n" "svuint64x2_t svld2(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))\n" "svuint16x2_t svld2(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))\n" "svint8x2_t svld2(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))\n" "svfloat64x2_t svld2(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))\n" "svfloat32x2_t svld2(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))\n" "svfloat16x2_t svld2(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))\n" "svint32x2_t svld2(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))\n" "svint64x2_t svld2(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))\n" "svint16x2_t svld2(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))\n" "svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))\n" "svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))\n" "svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))\n" "svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))\n" "svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))\n" "svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))\n" "svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))\n" "svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))\n" "svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))\n" "svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))\n" "svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))\n" "svuint8x3_t svld3(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))\n" "svuint32x3_t svld3(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))\n" "svuint64x3_t svld3(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))\n" "svuint16x3_t svld3(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))\n" "svint8x3_t svld3(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))\n" "svfloat64x3_t svld3(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))\n" "svfloat32x3_t svld3(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))\n" "svfloat16x3_t svld3(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))\n" "svint32x3_t svld3(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))\n" "svint64x3_t svld3(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))\n" "svint16x3_t svld3(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))\n" "svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))\n" "svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))\n" "svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))\n" "svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))\n" "svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))\n" "svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))\n" "svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))\n" "svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))\n" "svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))\n" "svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))\n" "svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))\n" "svuint8x4_t svld4(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))\n" "svuint32x4_t svld4(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))\n" "svuint64x4_t svld4(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))\n" "svuint16x4_t svld4(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))\n" "svint8x4_t svld4(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))\n" "svfloat64x4_t svld4(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))\n" "svfloat32x4_t svld4(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))\n" "svfloat16x4_t svld4(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))\n" "svint32x4_t svld4(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))\n" "svint64x4_t svld4(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))\n" "svint16x4_t svld4(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))\n" "svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))\n" "svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))\n" "svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))\n" "svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))\n" "svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))\n" "svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))\n" "svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))\n" "svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))\n" "svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))\n" "svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))\n" "svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))\n" "svuint8_t svldff1(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))\n" "svuint32_t svldff1(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))\n" "svuint64_t svldff1(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))\n" "svuint16_t svldff1(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))\n" "svint8_t svldff1(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))\n" "svfloat64_t svldff1(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))\n" "svfloat32_t svldff1(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))\n" "svfloat16_t svldff1(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))\n" "svint32_t svldff1(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))\n" "svint64_t svldff1(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))\n" "svint16_t svldff1(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))\n" "svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))\n" "svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))\n" "svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))\n" "svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))\n" "svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))\n" "svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))\n" "svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))\n" "svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))\n" "svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))\n" "svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))\n" "svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))\n" "svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))\n" "svuint32_t svldff1_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))\n" "svuint64_t svldff1_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))\n" "svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))\n" "svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))\n" "svint32_t svldff1_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))\n" "svint64_t svldff1_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))\n" "svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))\n" "svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))\n" "svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))\n" "svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))\n" "svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))\n" "svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))\n" "svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))\n" "svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))\n" "svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))\n" "svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))\n" "svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))\n" "svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))\n" "svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))\n" "svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))\n" "svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))\n" "svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))\n" "svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))\n" "svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))\n" "svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))\n" "svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))\n" "svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))\n" "svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))\n" "svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))\n" "svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))\n" "svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))\n" "svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))\n" "svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))\n" "svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))\n" "svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))\n" "svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))\n" "svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))\n" "svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))\n" "svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))\n" "svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))\n" "svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))\n" "svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))\n" "svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))\n" "svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))\n" "svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))\n" "svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))\n" "svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))\n" "svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))\n" "svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))\n" "svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))\n" "svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))\n" "svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))\n" "svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))\n" "svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))\n" "svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))\n" "svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))\n" "svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))\n" "svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))\n" "svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))\n" "svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))\n" "svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))\n" "svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))\n" "svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))\n" "svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))\n" "svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))\n" "svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))\n" "svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))\n" "svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))\n" "svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))\n" "svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))\n" "svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))\n" "svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))\n" "svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))\n" "svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))\n" "svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))\n" "svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))\n" "svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))\n" "svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))\n" "svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))\n" "svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))\n" "svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))\n" "svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))\n" "svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))\n" "svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))\n" "svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))\n" "svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))\n" "svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))\n" "svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))\n" "svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))\n" "svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))\n" "svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))\n" "svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))\n" "svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))\n" "svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))\n" "svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))\n" "svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))\n" "svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))\n" "svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))\n" "svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))\n" "svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))\n" "svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))\n" "svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))\n" "svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))\n" "svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))\n" "svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))\n" "svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))\n" "svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))\n" "svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))\n" "svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))\n" "svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))\n" "svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))\n" "svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))\n" "svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))\n" "svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))\n" "svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))\n" "svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))\n" "svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))\n" "svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))\n" "svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))\n" "svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))\n" "svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))\n" "svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))\n" "svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))\n" "svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))\n" "svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))\n" "svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))\n" "svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))\n" "svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))\n" "svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))\n" "svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))\n" "svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))\n" "svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))\n" "svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))\n" "svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))\n" "svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))\n" "svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))\n" "svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))\n" "svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))\n" "svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))\n" "svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))\n" "svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))\n" "svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))\n" "svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))\n" "svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))\n" "svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))\n" "svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))\n" "svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))\n" "svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))\n" "svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))\n" "svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))\n" "svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))\n" "svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))\n" "svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))\n" "svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))\n" "svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))\n" "svuint8_t svldnf1(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))\n" "svuint32_t svldnf1(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))\n" "svuint64_t svldnf1(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))\n" "svuint16_t svldnf1(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))\n" "svint8_t svldnf1(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))\n" "svfloat64_t svldnf1(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))\n" "svfloat32_t svldnf1(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))\n" "svfloat16_t svldnf1(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))\n" "svint32_t svldnf1(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))\n" "svint64_t svldnf1(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))\n" "svint16_t svldnf1(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))\n" "svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))\n" "svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))\n" "svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))\n" "svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))\n" "svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))\n" "svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))\n" "svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))\n" "svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))\n" "svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))\n" "svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))\n" "svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))\n" "svuint8_t svldnt1(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))\n" "svuint32_t svldnt1(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))\n" "svuint64_t svldnt1(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))\n" "svuint16_t svldnt1(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))\n" "svint8_t svldnt1(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))\n" "svfloat64_t svldnt1(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))\n" "svfloat32_t svldnt1(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))\n" "svfloat16_t svldnt1(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))\n" "svint32_t svldnt1(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))\n" "svint64_t svldnt1(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))\n" "svint16_t svldnt1(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))\n" "svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))\n" "svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))\n" "svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))\n" "svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))\n" "svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))\n" "svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))\n" "svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))\n" "svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))\n" "svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))\n" "svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))\n" "svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))\n" "uint64_t svlen(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))\n" "uint64_t svlen(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))\n" "uint64_t svlen(svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))\n" "uint64_t svlen(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))\n" "uint64_t svlen(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))\n" "uint64_t svlen(svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))\n" "uint64_t svlen(svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))\n" "uint64_t svlen(svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))\n" "uint64_t svlen(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))\n" "uint64_t svlen(svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))\n" "uint64_t svlen(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))\n" "svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))\n" "svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))\n" "svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))\n" "svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))\n" "svint8_t svlsl_m(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))\n" "svint32_t svlsl_m(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))\n" "svint64_t svlsl_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))\n" "svint16_t svlsl_m(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))\n" "svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))\n" "svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))\n" "svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))\n" "svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))\n" "svint8_t svlsl_x(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))\n" "svint32_t svlsl_x(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))\n" "svint64_t svlsl_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))\n" "svint16_t svlsl_x(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))\n" "svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))\n" "svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))\n" "svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))\n" "svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))\n" "svint8_t svlsl_z(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))\n" "svint32_t svlsl_z(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))\n" "svint64_t svlsl_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))\n" "svint16_t svlsl_z(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))\n" "svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))\n" "svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))\n" "svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))\n" "svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))\n" "svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))\n" "svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))\n" "svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))\n" "svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))\n" "svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))\n" "svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))\n" "svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))\n" "svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))\n" "svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))\n" "svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))\n" "svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))\n" "svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))\n" "svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))\n" "svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))\n" "svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))\n" "svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))\n" "svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))\n" "svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))\n" "svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))\n" "svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))\n" "svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))\n" "svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))\n" "svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))\n" "svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))\n" "svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))\n" "svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))\n" "svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))\n" "svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))\n" "svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))\n" "svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))\n" "svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))\n" "svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))\n" "svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))\n" "svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))\n" "svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))\n" "svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))\n" "svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))\n" "svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))\n" "svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))\n" "svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))\n" "svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))\n" "svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))\n" "svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))\n" "svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))\n" "svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))\n" "svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))\n" "svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))\n" "svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))\n" "svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))\n" "svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))\n" "svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))\n" "svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))\n" "svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))\n" "svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))\n" "svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))\n" "svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))\n" "svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))\n" "svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))\n" "svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))\n" "svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))\n" "svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))\n" "svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))\n" "svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))\n" "svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))\n" "svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))\n" "svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))\n" "svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))\n" "svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))\n" "svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))\n" "svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))\n" "svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))\n" "svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))\n" "svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))\n" "svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))\n" "svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))\n" "svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))\n" "svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))\n" "svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))\n" "svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))\n" "svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))\n" "svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))\n" "svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))\n" "svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))\n" "svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))\n" "svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))\n" "svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))\n" "svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))\n" "svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))\n" "svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))\n" "svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))\n" "svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))\n" "svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))\n" "svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))\n" "svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))\n" "svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))\n" "svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))\n" "svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))\n" "svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))\n" "svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))\n" "svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))\n" "svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))\n" "svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))\n" "svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))\n" "svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))\n" "svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))\n" "svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))\n" "svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))\n" "svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))\n" "svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))\n" "svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))\n" "svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))\n" "svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))\n" "svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))\n" "svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))\n" "svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))\n" "svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))\n" "svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))\n" "svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))\n" "svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))\n" "svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))\n" "svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))\n" "svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))\n" "svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))\n" "svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))\n" "svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))\n" "svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))\n" "svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))\n" "svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))\n" "svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))\n" "svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))\n" "svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))\n" "svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))\n" "svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))\n" "svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))\n" "svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))\n" "svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))\n" "svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))\n" "svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))\n" "svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))\n" "svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))\n" "svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))\n" "svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))\n" "svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))\n" "svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))\n" "svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))\n" "svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))\n" "svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))\n" "svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))\n" "svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))\n" "svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))\n" "svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))\n" "svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))\n" "svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))\n" "svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))\n" "svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))\n" "svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))\n" "svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))\n" "svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))\n" "svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))\n" "svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))\n" "svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))\n" "svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))\n" "svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))\n" "svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))\n" "svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))\n" "svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))\n" "svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))\n" "svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))\n" "svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))\n" "svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))\n" "svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))\n" "svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))\n" "svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))\n" "svint8_t svmax_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))\n" "svint32_t svmax_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))\n" "svint64_t svmax_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))\n" "svint16_t svmax_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))\n" "svint8_t svmax_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))\n" "svint32_t svmax_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))\n" "svint64_t svmax_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))\n" "svint16_t svmax_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))\n" "svint8_t svmax_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))\n" "svint32_t svmax_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))\n" "svint64_t svmax_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))\n" "svint16_t svmax_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))\n" "svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))\n" "svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))\n" "svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))\n" "svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))\n" "svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))\n" "svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))\n" "svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))\n" "svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))\n" "svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))\n" "svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))\n" "svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))\n" "svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))\n" "svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))\n" "svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))\n" "svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))\n" "svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))\n" "svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))\n" "svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))\n" "svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))\n" "svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))\n" "svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))\n" "svint8_t svmax_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))\n" "svint32_t svmax_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))\n" "svint64_t svmax_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))\n" "svint16_t svmax_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))\n" "svint8_t svmax_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))\n" "svint32_t svmax_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))\n" "svint64_t svmax_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))\n" "svint16_t svmax_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))\n" "svint8_t svmax_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))\n" "svint32_t svmax_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))\n" "svint64_t svmax_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))\n" "svint16_t svmax_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))\n" "svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))\n" "svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))\n" "svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))\n" "svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))\n" "svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))\n" "svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))\n" "svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))\n" "svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))\n" "svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))\n" "svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))\n" "svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))\n" "svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))\n" "svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))\n" "svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))\n" "svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))\n" "svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))\n" "svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))\n" "svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))\n" "svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))\n" "svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))\n" "svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))\n" "svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))\n" "svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))\n" "svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))\n" "svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))\n" "svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))\n" "svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))\n" "svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))\n" "svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))\n" "svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))\n" "float64_t svmaxnmv(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))\n" "float32_t svmaxnmv(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))\n" "float16_t svmaxnmv(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))\n" "float64_t svmaxv(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))\n" "float32_t svmaxv(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))\n" "float16_t svmaxv(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))\n" "int8_t svmaxv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))\n" "int32_t svmaxv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))\n" "int64_t svmaxv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))\n" "int16_t svmaxv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))\n" "uint8_t svmaxv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))\n" "uint32_t svmaxv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))\n" "uint64_t svmaxv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))\n" "uint16_t svmaxv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))\n" "svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))\n" "svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))\n" "svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))\n" "svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))\n" "svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))\n" "svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))\n" "svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))\n" "svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))\n" "svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))\n" "svint8_t svmin_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))\n" "svint32_t svmin_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))\n" "svint64_t svmin_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))\n" "svint16_t svmin_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))\n" "svint8_t svmin_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))\n" "svint32_t svmin_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))\n" "svint64_t svmin_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))\n" "svint16_t svmin_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))\n" "svint8_t svmin_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))\n" "svint32_t svmin_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))\n" "svint64_t svmin_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))\n" "svint16_t svmin_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))\n" "svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))\n" "svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))\n" "svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))\n" "svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))\n" "svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))\n" "svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))\n" "svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))\n" "svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))\n" "svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))\n" "svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))\n" "svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))\n" "svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))\n" "svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))\n" "svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))\n" "svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))\n" "svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))\n" "svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))\n" "svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))\n" "svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))\n" "svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))\n" "svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))\n" "svint8_t svmin_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))\n" "svint32_t svmin_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))\n" "svint64_t svmin_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))\n" "svint16_t svmin_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))\n" "svint8_t svmin_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))\n" "svint32_t svmin_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))\n" "svint64_t svmin_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))\n" "svint16_t svmin_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))\n" "svint8_t svmin_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))\n" "svint32_t svmin_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))\n" "svint64_t svmin_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))\n" "svint16_t svmin_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))\n" "svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))\n" "svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))\n" "svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))\n" "svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))\n" "svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))\n" "svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))\n" "svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))\n" "svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))\n" "svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))\n" "svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))\n" "svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))\n" "svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))\n" "svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))\n" "svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))\n" "svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))\n" "svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))\n" "svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))\n" "svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))\n" "svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))\n" "svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))\n" "svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))\n" "svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))\n" "svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))\n" "svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))\n" "svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))\n" "svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))\n" "svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))\n" "svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))\n" "svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))\n" "svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))\n" "float64_t svminnmv(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))\n" "float32_t svminnmv(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))\n" "float16_t svminnmv(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))\n" "float64_t svminv(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))\n" "float32_t svminv(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))\n" "float16_t svminv(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))\n" "int8_t svminv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))\n" "int32_t svminv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))\n" "int64_t svminv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))\n" "int16_t svminv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))\n" "uint8_t svminv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))\n" "uint32_t svminv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))\n" "uint64_t svminv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))\n" "uint16_t svminv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))\n" "svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))\n" "svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))\n" "svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))\n" "svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))\n" "svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))\n" "svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))\n" "svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))\n" "svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))\n" "svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))\n" "svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))\n" "svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))\n" "svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))\n" "svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))\n" "svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))\n" "svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))\n" "svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))\n" "svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))\n" "svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))\n" "svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))\n" "svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))\n" "svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))\n" "svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))\n" "svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))\n" "svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))\n" "svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))\n" "svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))\n" "svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))\n" "svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))\n" "svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))\n" "svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))\n" "svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))\n" "svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))\n" "svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))\n" "svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))\n" "svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))\n" "svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))\n" "svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))\n" "svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))\n" "svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))\n" "svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))\n" "svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))\n" "svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))\n" "svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))\n" "svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))\n" "svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))\n" "svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))\n" "svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))\n" "svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))\n" "svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))\n" "svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))\n" "svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))\n" "svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))\n" "svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))\n" "svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))\n" "svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))\n" "svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))\n" "svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))\n" "svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))\n" "svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))\n" "svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))\n" "svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))\n" "svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))\n" "svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))\n" "svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))\n" "svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))\n" "svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))\n" "svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))\n" "svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))\n" "svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))\n" "svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))\n" "svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))\n" "svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))\n" "svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))\n" "svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))\n" "svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))\n" "svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))\n" "svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))\n" "svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))\n" "svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))\n" "svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))\n" "svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))\n" "svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))\n" "svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))\n" "svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))\n" "svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))\n" "svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))\n" "svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))\n" "svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))\n" "svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))\n" "svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))\n" "svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))\n" "svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))\n" "svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))\n" "svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))\n" "svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))\n" "svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))\n" "svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))\n" "svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))\n" "svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))\n" "svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))\n" "svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))\n" "svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))\n" "svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))\n" "svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))\n" "svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))\n" "svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))\n" "svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))\n" "svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))\n" "svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))\n" "svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))\n" "svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))\n" "svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))\n" "svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))\n" "svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))\n" "svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))\n" "svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))\n" "svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))\n" "svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))\n" "svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))\n" "svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))\n" "svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))\n" "svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))\n" "svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))\n" "svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))\n" "svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))\n" "svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))\n" "svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))\n" "svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))\n" "svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))\n" "svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))\n" "svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))\n" "svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))\n" "svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))\n" "svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))\n" "svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))\n" "svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))\n" "svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))\n" "svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))\n" "svbool_t svmov_z(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))\n" "svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))\n" "svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))\n" "svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))\n" "svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))\n" "svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))\n" "svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))\n" "svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))\n" "svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))\n" "svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))\n" "svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))\n" "svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))\n" "svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))\n" "svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))\n" "svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))\n" "svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))\n" "svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))\n" "svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))\n" "svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))\n" "svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))\n" "svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))\n" "svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))\n" "svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))\n" "svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))\n" "svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))\n" "svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))\n" "svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))\n" "svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))\n" "svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))\n" "svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))\n" "svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))\n" "svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))\n" "svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))\n" "svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))\n" "svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))\n" "svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))\n" "svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))\n" "svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))\n" "svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))\n" "svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))\n" "svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))\n" "svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))\n" "svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))\n" "svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))\n" "svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))\n" "svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))\n" "svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))\n" "svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))\n" "svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))\n" "svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))\n" "svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))\n" "svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))\n" "svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))\n" "svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))\n" "svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))\n" "svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))\n" "svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))\n" "svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))\n" "svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))\n" "svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))\n" "svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))\n" "svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))\n" "svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))\n" "svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))\n" "svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))\n" "svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))\n" "svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))\n" "svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))\n" "svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))\n" "svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))\n" "svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))\n" "svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))\n" "svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))\n" "svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))\n" "svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))\n" "svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))\n" "svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))\n" "svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))\n" "svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))\n" "svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))\n" "svint8_t svmul_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))\n" "svint32_t svmul_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))\n" "svint64_t svmul_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))\n" "svint16_t svmul_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))\n" "svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))\n" "svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))\n" "svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))\n" "svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))\n" "svint8_t svmul_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))\n" "svint32_t svmul_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))\n" "svint64_t svmul_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))\n" "svint16_t svmul_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))\n" "svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))\n" "svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))\n" "svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))\n" "svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))\n" "svint8_t svmul_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))\n" "svint32_t svmul_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))\n" "svint64_t svmul_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))\n" "svint16_t svmul_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))\n" "svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))\n" "svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))\n" "svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))\n" "svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))\n" "svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))\n" "svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))\n" "svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))\n" "svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))\n" "svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))\n" "svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))\n" "svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))\n" "svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))\n" "svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))\n" "svint8_t svmul_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))\n" "svint32_t svmul_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))\n" "svint64_t svmul_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))\n" "svint16_t svmul_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))\n" "svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))\n" "svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))\n" "svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))\n" "svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))\n" "svint8_t svmul_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))\n" "svint32_t svmul_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))\n" "svint64_t svmul_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))\n" "svint16_t svmul_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))\n" "svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))\n" "svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))\n" "svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))\n" "svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))\n" "svint8_t svmul_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))\n" "svint32_t svmul_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))\n" "svint64_t svmul_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))\n" "svint16_t svmul_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))\n" "svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))\n" "svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))\n" "svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))\n" "svint8_t svmulh_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))\n" "svint32_t svmulh_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))\n" "svint64_t svmulh_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))\n" "svint16_t svmulh_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))\n" "svint8_t svmulh_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))\n" "svint32_t svmulh_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))\n" "svint64_t svmulh_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))\n" "svint16_t svmulh_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))\n" "svint8_t svmulh_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))\n" "svint32_t svmulh_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))\n" "svint64_t svmulh_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))\n" "svint16_t svmulh_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))\n" "svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))\n" "svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))\n" "svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))\n" "svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))\n" "svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))\n" "svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))\n" "svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))\n" "svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))\n" "svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))\n" "svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))\n" "svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))\n" "svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))\n" "svint8_t svmulh_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))\n" "svint32_t svmulh_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))\n" "svint64_t svmulh_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))\n" "svint16_t svmulh_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))\n" "svint8_t svmulh_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))\n" "svint32_t svmulh_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))\n" "svint64_t svmulh_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))\n" "svint16_t svmulh_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))\n" "svint8_t svmulh_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))\n" "svint32_t svmulh_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))\n" "svint64_t svmulh_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))\n" "svint16_t svmulh_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))\n" "svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))\n" "svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))\n" "svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))\n" "svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))\n" "svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))\n" "svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))\n" "svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))\n" "svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))\n" "svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))\n" "svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))\n" "svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))\n" "svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))\n" "svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))\n" "svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))\n" "svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))\n" "svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))\n" "svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))\n" "svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))\n" "svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))\n" "svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))\n" "svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))\n" "svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))\n" "svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))\n" "svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))\n" "svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))\n" "svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))\n" "svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))\n" "svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))\n" "svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))\n" "svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))\n" "svbool_t svnand_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))\n" "svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))\n" "svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))\n" "svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))\n" "svfloat64_t svneg_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))\n" "svfloat32_t svneg_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))\n" "svfloat16_t svneg_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))\n" "svfloat64_t svneg_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))\n" "svfloat32_t svneg_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))\n" "svfloat16_t svneg_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))\n" "svint8_t svneg_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))\n" "svint32_t svneg_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))\n" "svint64_t svneg_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))\n" "svint16_t svneg_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))\n" "svint8_t svneg_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))\n" "svint32_t svneg_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))\n" "svint64_t svneg_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))\n" "svint16_t svneg_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))\n" "svint8_t svneg_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))\n" "svint32_t svneg_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))\n" "svint64_t svneg_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))\n" "svint16_t svneg_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))\n" "svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))\n" "svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))\n" "svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))\n" "svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))\n" "svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))\n" "svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))\n" "svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))\n" "svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))\n" "svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))\n" "svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))\n" "svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))\n" "svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))\n" "svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))\n" "svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))\n" "svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))\n" "svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))\n" "svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))\n" "svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))\n" "svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))\n" "svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))\n" "svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))\n" "svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))\n" "svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))\n" "svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))\n" "svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))\n" "svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))\n" "svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))\n" "svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))\n" "svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))\n" "svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))\n" "svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))\n" "svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))\n" "svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))\n" "svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))\n" "svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))\n" "svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))\n" "svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))\n" "svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))\n" "svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))\n" "svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))\n" "svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))\n" "svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))\n" "svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))\n" "svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))\n" "svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))\n" "svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))\n" "svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))\n" "svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))\n" "svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))\n" "svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))\n" "svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))\n" "svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))\n" "svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))\n" "svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))\n" "svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))\n" "svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))\n" "svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))\n" "svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))\n" "svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))\n" "svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))\n" "svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))\n" "svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))\n" "svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))\n" "svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))\n" "svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))\n" "svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))\n" "svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))\n" "svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))\n" "svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))\n" "svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))\n" "svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))\n" "svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))\n" "svbool_t svnor_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))\n" "svbool_t svnot_z(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))\n" "svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))\n" "svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))\n" "svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))\n" "svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))\n" "svint8_t svnot_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))\n" "svint32_t svnot_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))\n" "svint64_t svnot_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))\n" "svint16_t svnot_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))\n" "svuint8_t svnot_x(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))\n" "svuint32_t svnot_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))\n" "svuint64_t svnot_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))\n" "svuint16_t svnot_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))\n" "svint8_t svnot_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))\n" "svint32_t svnot_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))\n" "svint64_t svnot_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))\n" "svint16_t svnot_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))\n" "svuint8_t svnot_z(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))\n" "svuint32_t svnot_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))\n" "svuint64_t svnot_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))\n" "svuint16_t svnot_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))\n" "svint8_t svnot_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))\n" "svint32_t svnot_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))\n" "svint64_t svnot_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))\n" "svint16_t svnot_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))\n" "svbool_t svorn_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))\n" "svbool_t svorr_z(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))\n" "svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))\n" "svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))\n" "svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))\n" "svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))\n" "svint8_t svorr_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))\n" "svint32_t svorr_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))\n" "svint64_t svorr_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))\n" "svint16_t svorr_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))\n" "svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))\n" "svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))\n" "svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))\n" "svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))\n" "svint8_t svorr_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))\n" "svint32_t svorr_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))\n" "svint64_t svorr_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))\n" "svint16_t svorr_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))\n" "svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))\n" "svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))\n" "svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))\n" "svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))\n" "svint8_t svorr_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))\n" "svint32_t svorr_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))\n" "svint64_t svorr_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))\n" "svint16_t svorr_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))\n" "svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))\n" "svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))\n" "svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))\n" "svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))\n" "svint8_t svorr_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))\n" "svint32_t svorr_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))\n" "svint64_t svorr_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))\n" "svint16_t svorr_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))\n" "svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))\n" "svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))\n" "svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))\n" "svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))\n" "svint8_t svorr_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))\n" "svint32_t svorr_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))\n" "svint64_t svorr_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))\n" "svint16_t svorr_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))\n" "svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))\n" "svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))\n" "svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))\n" "svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))\n" "svint8_t svorr_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))\n" "svint32_t svorr_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))\n" "svint64_t svorr_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))\n" "svint16_t svorr_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))\n" "uint8_t svorv(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))\n" "uint32_t svorv(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))\n" "uint64_t svorv(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))\n" "uint16_t svorv(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))\n" "int8_t svorv(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))\n" "int32_t svorv(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))\n" "int64_t svorv(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))\n" "int16_t svorv(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b)))\n" "svbool_t svpfalse(void);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))\n" "svbool_t svpfirst(svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))\n" "void svprfb_gather(svbool_t, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))\n" "void svprfb_gather(svbool_t, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))\n" "void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))\n" "void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))\n" "void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))\n" "void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))\n" "void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))\n" "void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))\n" "void svprfd_gather(svbool_t, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))\n" "void svprfd_gather(svbool_t, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))\n" "void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))\n" "void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))\n" "void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))\n" "void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))\n" "void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))\n" "void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))\n" "void svprfh_gather(svbool_t, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))\n" "void svprfh_gather(svbool_t, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))\n" "void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))\n" "void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))\n" "void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))\n" "void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))\n" "void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))\n" "void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))\n" "void svprfw_gather(svbool_t, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))\n" "void svprfw_gather(svbool_t, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))\n" "void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))\n" "void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))\n" "void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))\n" "void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))\n" "void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))\n" "void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))\n" "svint8_t svqadd(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))\n" "svint32_t svqadd(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))\n" "svint64_t svqadd(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))\n" "svint16_t svqadd(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))\n" "svuint8_t svqadd(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))\n" "svuint32_t svqadd(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))\n" "svuint64_t svqadd(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))\n" "svuint16_t svqadd(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))\n" "svint8_t svqadd(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))\n" "svint32_t svqadd(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))\n" "svint64_t svqadd(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))\n" "svint16_t svqadd(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))\n" "svuint8_t svqadd(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))\n" "svuint32_t svqadd(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))\n" "svuint64_t svqadd(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))\n" "svuint16_t svqadd(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))\n" "int32_t svqdecb(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))\n" "int64_t svqdecb(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))\n" "uint32_t svqdecb(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))\n" "uint64_t svqdecb(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))\n" "int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))\n" "int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))\n" "uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))\n" "uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))\n" "int32_t svqdecd(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))\n" "int64_t svqdecd(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))\n" "uint32_t svqdecd(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))\n" "uint64_t svqdecd(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))\n" "svint64_t svqdecd(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))\n" "svuint64_t svqdecd(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))\n" "int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))\n" "int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))\n" "uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))\n" "uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))\n" "svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))\n" "svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))\n" "int32_t svqdech(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))\n" "int64_t svqdech(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))\n" "uint32_t svqdech(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))\n" "uint64_t svqdech(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))\n" "svint16_t svqdech(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))\n" "svuint16_t svqdech(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))\n" "int32_t svqdech_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))\n" "int64_t svqdech_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))\n" "uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))\n" "uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))\n" "svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))\n" "svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))\n" "int32_t svqdecp_b8(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))\n" "int32_t svqdecp_b32(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))\n" "int32_t svqdecp_b64(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))\n" "int32_t svqdecp_b16(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))\n" "int64_t svqdecp_b8(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))\n" "int64_t svqdecp_b32(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))\n" "int64_t svqdecp_b64(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))\n" "int64_t svqdecp_b16(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))\n" "uint32_t svqdecp_b8(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))\n" "uint32_t svqdecp_b32(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))\n" "uint32_t svqdecp_b64(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))\n" "uint32_t svqdecp_b16(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))\n" "uint64_t svqdecp_b8(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))\n" "uint64_t svqdecp_b32(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))\n" "uint64_t svqdecp_b64(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))\n" "uint64_t svqdecp_b16(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))\n" "svint32_t svqdecp(svint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))\n" "svint64_t svqdecp(svint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))\n" "svint16_t svqdecp(svint16_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))\n" "svuint32_t svqdecp(svuint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))\n" "svuint64_t svqdecp(svuint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))\n" "svuint16_t svqdecp(svuint16_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))\n" "int32_t svqdecw(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))\n" "int64_t svqdecw(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))\n" "uint32_t svqdecw(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))\n" "uint64_t svqdecw(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))\n" "svint32_t svqdecw(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))\n" "svuint32_t svqdecw(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))\n" "int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))\n" "int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))\n" "uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))\n" "uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))\n" "svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))\n" "svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))\n" "int32_t svqincb(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))\n" "int64_t svqincb(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))\n" "uint32_t svqincb(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))\n" "uint64_t svqincb(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))\n" "int32_t svqincb_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))\n" "int64_t svqincb_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))\n" "uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))\n" "uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))\n" "int32_t svqincd(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))\n" "int64_t svqincd(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))\n" "uint32_t svqincd(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))\n" "uint64_t svqincd(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))\n" "svint64_t svqincd(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))\n" "svuint64_t svqincd(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))\n" "int32_t svqincd_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))\n" "int64_t svqincd_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))\n" "uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))\n" "uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))\n" "svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))\n" "svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))\n" "int32_t svqinch(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))\n" "int64_t svqinch(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))\n" "uint32_t svqinch(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))\n" "uint64_t svqinch(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))\n" "svint16_t svqinch(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))\n" "svuint16_t svqinch(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))\n" "int32_t svqinch_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))\n" "int64_t svqinch_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))\n" "uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))\n" "uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))\n" "svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))\n" "svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))\n" "int32_t svqincp_b8(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))\n" "int32_t svqincp_b32(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))\n" "int32_t svqincp_b64(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))\n" "int32_t svqincp_b16(int32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))\n" "int64_t svqincp_b8(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))\n" "int64_t svqincp_b32(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))\n" "int64_t svqincp_b64(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))\n" "int64_t svqincp_b16(int64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))\n" "uint32_t svqincp_b8(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))\n" "uint32_t svqincp_b32(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))\n" "uint32_t svqincp_b64(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))\n" "uint32_t svqincp_b16(uint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))\n" "uint64_t svqincp_b8(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))\n" "uint64_t svqincp_b32(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))\n" "uint64_t svqincp_b64(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))\n" "uint64_t svqincp_b16(uint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))\n" "svint32_t svqincp(svint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))\n" "svint64_t svqincp(svint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))\n" "svint16_t svqincp(svint16_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))\n" "svuint32_t svqincp(svuint32_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))\n" "svuint64_t svqincp(svuint64_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))\n" "svuint16_t svqincp(svuint16_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))\n" "int32_t svqincw(int32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))\n" "int64_t svqincw(int64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))\n" "uint32_t svqincw(uint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))\n" "uint64_t svqincw(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))\n" "svint32_t svqincw(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))\n" "svuint32_t svqincw(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))\n" "int32_t svqincw_pat(int32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))\n" "int64_t svqincw_pat(int64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))\n" "uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))\n" "uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))\n" "svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))\n" "svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))\n" "svint8_t svqsub(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))\n" "svint32_t svqsub(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))\n" "svint64_t svqsub(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))\n" "svint16_t svqsub(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))\n" "svuint8_t svqsub(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))\n" "svuint32_t svqsub(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))\n" "svuint64_t svqsub(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))\n" "svuint16_t svqsub(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))\n" "svint8_t svqsub(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))\n" "svint32_t svqsub(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))\n" "svint64_t svqsub(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))\n" "svint16_t svqsub(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))\n" "svuint8_t svqsub(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))\n" "svuint32_t svqsub(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))\n" "svuint64_t svqsub(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))\n" "svuint16_t svqsub(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))\n" "svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))\n" "svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))\n" "svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))\n" "svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))\n" "svint8_t svrbit_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))\n" "svint32_t svrbit_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))\n" "svint64_t svrbit_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))\n" "svint16_t svrbit_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))\n" "svuint8_t svrbit_x(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))\n" "svuint32_t svrbit_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))\n" "svuint64_t svrbit_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))\n" "svuint16_t svrbit_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))\n" "svint8_t svrbit_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))\n" "svint32_t svrbit_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))\n" "svint64_t svrbit_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))\n" "svint16_t svrbit_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))\n" "svuint8_t svrbit_z(svbool_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))\n" "svuint32_t svrbit_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))\n" "svuint64_t svrbit_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))\n" "svuint16_t svrbit_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))\n" "svint8_t svrbit_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))\n" "svint32_t svrbit_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))\n" "svint64_t svrbit_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))\n" "svint16_t svrbit_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))\n" "svfloat64_t svrecpe(svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))\n" "svfloat32_t svrecpe(svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))\n" "svfloat16_t svrecpe(svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))\n" "svfloat64_t svrecps(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))\n" "svfloat32_t svrecps(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))\n" "svfloat16_t svrecps(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))\n" "svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))\n" "svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))\n" "svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))\n" "svfloat64_t svrecpx_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))\n" "svfloat32_t svrecpx_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))\n" "svfloat16_t svrecpx_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))\n" "svfloat64_t svrecpx_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))\n" "svfloat32_t svrecpx_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))\n" "svfloat16_t svrecpx_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))\n" "svuint8_t svrev(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))\n" "svuint32_t svrev(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))\n" "svuint64_t svrev(svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))\n" "svuint16_t svrev(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))\n" "svint8_t svrev(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))\n" "svfloat64_t svrev(svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))\n" "svfloat32_t svrev(svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))\n" "svfloat16_t svrev(svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))\n" "svint32_t svrev(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))\n" "svint64_t svrev(svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))\n" "svint16_t svrev(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))\n" "svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))\n" "svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))\n" "svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))\n" "svint32_t svrevb_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))\n" "svint64_t svrevb_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))\n" "svint16_t svrevb_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))\n" "svuint32_t svrevb_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))\n" "svuint64_t svrevb_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))\n" "svuint16_t svrevb_x(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))\n" "svint32_t svrevb_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))\n" "svint64_t svrevb_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))\n" "svint16_t svrevb_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))\n" "svuint32_t svrevb_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))\n" "svuint64_t svrevb_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))\n" "svuint16_t svrevb_z(svbool_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))\n" "svint32_t svrevb_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))\n" "svint64_t svrevb_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))\n" "svint16_t svrevb_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))\n" "svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))\n" "svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))\n" "svint32_t svrevh_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))\n" "svint64_t svrevh_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))\n" "svuint32_t svrevh_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))\n" "svuint64_t svrevh_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))\n" "svint32_t svrevh_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))\n" "svint64_t svrevh_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))\n" "svuint32_t svrevh_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))\n" "svuint64_t svrevh_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))\n" "svint32_t svrevh_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))\n" "svint64_t svrevh_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))\n" "svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))\n" "svint64_t svrevw_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))\n" "svuint64_t svrevw_x(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))\n" "svint64_t svrevw_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))\n" "svuint64_t svrevw_z(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))\n" "svint64_t svrevw_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))\n" "svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))\n" "svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))\n" "svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))\n" "svfloat64_t svrinta_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))\n" "svfloat32_t svrinta_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))\n" "svfloat16_t svrinta_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))\n" "svfloat64_t svrinta_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))\n" "svfloat32_t svrinta_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))\n" "svfloat16_t svrinta_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))\n" "svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))\n" "svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))\n" "svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))\n" "svfloat64_t svrinti_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))\n" "svfloat32_t svrinti_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))\n" "svfloat16_t svrinti_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))\n" "svfloat64_t svrinti_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))\n" "svfloat32_t svrinti_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))\n" "svfloat16_t svrinti_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))\n" "svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))\n" "svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))\n" "svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))\n" "svfloat64_t svrintm_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))\n" "svfloat32_t svrintm_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))\n" "svfloat16_t svrintm_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))\n" "svfloat64_t svrintm_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))\n" "svfloat32_t svrintm_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))\n" "svfloat16_t svrintm_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))\n" "svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))\n" "svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))\n" "svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))\n" "svfloat64_t svrintn_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))\n" "svfloat32_t svrintn_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))\n" "svfloat16_t svrintn_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))\n" "svfloat64_t svrintn_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))\n" "svfloat32_t svrintn_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))\n" "svfloat16_t svrintn_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))\n" "svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))\n" "svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))\n" "svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))\n" "svfloat64_t svrintp_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))\n" "svfloat32_t svrintp_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))\n" "svfloat16_t svrintp_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))\n" "svfloat64_t svrintp_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))\n" "svfloat32_t svrintp_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))\n" "svfloat16_t svrintp_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))\n" "svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))\n" "svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))\n" "svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))\n" "svfloat64_t svrintx_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))\n" "svfloat32_t svrintx_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))\n" "svfloat16_t svrintx_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))\n" "svfloat64_t svrintx_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))\n" "svfloat32_t svrintx_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))\n" "svfloat16_t svrintx_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))\n" "svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))\n" "svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))\n" "svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))\n" "svfloat64_t svrintz_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))\n" "svfloat32_t svrintz_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))\n" "svfloat16_t svrintz_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))\n" "svfloat64_t svrintz_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))\n" "svfloat32_t svrintz_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))\n" "svfloat16_t svrintz_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))\n" "svfloat64_t svrsqrte(svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))\n" "svfloat32_t svrsqrte(svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))\n" "svfloat16_t svrsqrte(svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))\n" "svfloat64_t svrsqrts(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))\n" "svfloat32_t svrsqrts(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))\n" "svfloat16_t svrsqrts(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))\n" "svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))\n" "svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))\n" "svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))\n" "svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))\n" "svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))\n" "svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))\n" "svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))\n" "svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))\n" "svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))\n" "svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))\n" "svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))\n" "svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))\n" "svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))\n" "svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))\n" "svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))\n" "svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))\n" "svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))\n" "svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))\n" "svbool_t svsel(svbool_t, svbool_t, svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))\n" "svuint8_t svsel(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))\n" "svuint32_t svsel(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))\n" "svuint64_t svsel(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))\n" "svuint16_t svsel(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))\n" "svint8_t svsel(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))\n" "svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))\n" "svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))\n" "svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))\n" "svint32_t svsel(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))\n" "svint64_t svsel(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))\n" "svint16_t svsel(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))\n" "svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))\n" "svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))\n" "svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))\n" "svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))\n" "svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))\n" "svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))\n" "svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))\n" "svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))\n" "svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))\n" "svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))\n" "svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))\n" "svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))\n" "svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))\n" "svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))\n" "svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))\n" "svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))\n" "svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))\n" "svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))\n" "svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))\n" "svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))\n" "svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))\n" "svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))\n" "svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))\n" "svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))\n" "svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))\n" "svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))\n" "svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))\n" "svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))\n" "svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))\n" "svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))\n" "svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))\n" "svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))\n" "svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))\n" "svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))\n" "svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))\n" "svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))\n" "svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))\n" "svint8_t svsplice(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))\n" "svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))\n" "svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))\n" "svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))\n" "svint32_t svsplice(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))\n" "svint64_t svsplice(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))\n" "svint16_t svsplice(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))\n" "svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))\n" "svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))\n" "svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))\n" "svfloat64_t svsqrt_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))\n" "svfloat32_t svsqrt_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))\n" "svfloat16_t svsqrt_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))\n" "svfloat64_t svsqrt_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))\n" "svfloat32_t svsqrt_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))\n" "svfloat16_t svsqrt_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))\n" "void svst1(svbool_t, uint8_t *, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))\n" "void svst1(svbool_t, uint32_t *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))\n" "void svst1(svbool_t, uint64_t *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))\n" "void svst1(svbool_t, uint16_t *, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))\n" "void svst1(svbool_t, int8_t *, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))\n" "void svst1(svbool_t, float64_t *, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))\n" "void svst1(svbool_t, float32_t *, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))\n" "void svst1(svbool_t, float16_t *, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))\n" "void svst1(svbool_t, int32_t *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))\n" "void svst1(svbool_t, int64_t *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))\n" "void svst1(svbool_t, int16_t *, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))\n" "void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))\n" "void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))\n" "void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))\n" "void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))\n" "void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))\n" "void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))\n" "void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))\n" "void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))\n" "void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))\n" "void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))\n" "void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))\n" "void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))\n" "void svst1_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))\n" "void svst1_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))\n" "void svst1_scatter(svbool_t, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))\n" "void svst1_scatter(svbool_t, svuint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))\n" "void svst1_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))\n" "void svst1_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))\n" "void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))\n" "void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))\n" "void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))\n" "void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))\n" "void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))\n" "void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))\n" "void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))\n" "void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))\n" "void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))\n" "void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))\n" "void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))\n" "void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))\n" "void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))\n" "void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))\n" "void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))\n" "void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))\n" "void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))\n" "void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))\n" "void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))\n" "void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))\n" "void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))\n" "void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))\n" "void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))\n" "void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))\n" "void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))\n" "void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))\n" "void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))\n" "void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))\n" "void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))\n" "void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))\n" "void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))\n" "void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))\n" "void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))\n" "void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))\n" "void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))\n" "void svst1b(svbool_t, int8_t *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))\n" "void svst1b(svbool_t, int8_t *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))\n" "void svst1b(svbool_t, int8_t *, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))\n" "void svst1b(svbool_t, uint8_t *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))\n" "void svst1b(svbool_t, uint8_t *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))\n" "void svst1b(svbool_t, uint8_t *, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))\n" "void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))\n" "void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))\n" "void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))\n" "void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))\n" "void svst1b_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))\n" "void svst1b_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))\n" "void svst1b_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))\n" "void svst1b_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))\n" "void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))\n" "void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))\n" "void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))\n" "void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))\n" "void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))\n" "void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))\n" "void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))\n" "void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))\n" "void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))\n" "void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))\n" "void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))\n" "void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))\n" "void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))\n" "void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))\n" "void svst1h(svbool_t, int16_t *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))\n" "void svst1h(svbool_t, int16_t *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))\n" "void svst1h(svbool_t, uint16_t *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))\n" "void svst1h(svbool_t, uint16_t *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))\n" "void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))\n" "void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))\n" "void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))\n" "void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))\n" "void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))\n" "void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))\n" "void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))\n" "void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))\n" "void svst1h_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))\n" "void svst1h_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))\n" "void svst1h_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))\n" "void svst1h_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))\n" "void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))\n" "void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))\n" "void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))\n" "void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))\n" "void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))\n" "void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))\n" "void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))\n" "void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))\n" "void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))\n" "void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))\n" "void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))\n" "void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))\n" "void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))\n" "void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))\n" "void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))\n" "void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))\n" "void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))\n" "void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))\n" "void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))\n" "void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))\n" "void svst1w(svbool_t, int32_t *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))\n" "void svst1w(svbool_t, uint32_t *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))\n" "void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))\n" "void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))\n" "void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))\n" "void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))\n" "void svst1w_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))\n" "void svst1w_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))\n" "void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))\n" "void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))\n" "void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))\n" "void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))\n" "void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))\n" "void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))\n" "void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))\n" "void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))\n" "void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))\n" "void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))\n" "void svst2(svbool_t, uint8_t *, svuint8x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))\n" "void svst2(svbool_t, uint32_t *, svuint32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))\n" "void svst2(svbool_t, uint64_t *, svuint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))\n" "void svst2(svbool_t, uint16_t *, svuint16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))\n" "void svst2(svbool_t, int8_t *, svint8x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))\n" "void svst2(svbool_t, float64_t *, svfloat64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))\n" "void svst2(svbool_t, float32_t *, svfloat32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))\n" "void svst2(svbool_t, float16_t *, svfloat16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))\n" "void svst2(svbool_t, int32_t *, svint32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))\n" "void svst2(svbool_t, int64_t *, svint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))\n" "void svst2(svbool_t, int16_t *, svint16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))\n" "void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))\n" "void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))\n" "void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))\n" "void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))\n" "void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))\n" "void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))\n" "void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))\n" "void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))\n" "void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))\n" "void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))\n" "void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))\n" "void svst3(svbool_t, uint8_t *, svuint8x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))\n" "void svst3(svbool_t, uint32_t *, svuint32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))\n" "void svst3(svbool_t, uint64_t *, svuint64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))\n" "void svst3(svbool_t, uint16_t *, svuint16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))\n" "void svst3(svbool_t, int8_t *, svint8x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))\n" "void svst3(svbool_t, float64_t *, svfloat64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))\n" "void svst3(svbool_t, float32_t *, svfloat32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))\n" "void svst3(svbool_t, float16_t *, svfloat16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))\n" "void svst3(svbool_t, int32_t *, svint32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))\n" "void svst3(svbool_t, int64_t *, svint64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))\n" "void svst3(svbool_t, int16_t *, svint16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))\n" "void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))\n" "void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))\n" "void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))\n" "void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))\n" "void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))\n" "void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))\n" "void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))\n" "void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))\n" "void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))\n" "void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))\n" "void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))\n" "void svst4(svbool_t, uint8_t *, svuint8x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))\n" "void svst4(svbool_t, uint32_t *, svuint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))\n" "void svst4(svbool_t, uint64_t *, svuint64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))\n" "void svst4(svbool_t, uint16_t *, svuint16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))\n" "void svst4(svbool_t, int8_t *, svint8x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))\n" "void svst4(svbool_t, float64_t *, svfloat64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))\n" "void svst4(svbool_t, float32_t *, svfloat32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))\n" "void svst4(svbool_t, float16_t *, svfloat16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))\n" "void svst4(svbool_t, int32_t *, svint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))\n" "void svst4(svbool_t, int64_t *, svint64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))\n" "void svst4(svbool_t, int16_t *, svint16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))\n" "void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))\n" "void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))\n" "void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))\n" "void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))\n" "void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))\n" "void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))\n" "void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))\n" "void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))\n" "void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))\n" "void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))\n" "void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))\n" "void svstnt1(svbool_t, uint8_t *, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))\n" "void svstnt1(svbool_t, uint32_t *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))\n" "void svstnt1(svbool_t, uint64_t *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))\n" "void svstnt1(svbool_t, uint16_t *, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))\n" "void svstnt1(svbool_t, int8_t *, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))\n" "void svstnt1(svbool_t, float64_t *, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))\n" "void svstnt1(svbool_t, float32_t *, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))\n" "void svstnt1(svbool_t, float16_t *, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))\n" "void svstnt1(svbool_t, int32_t *, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))\n" "void svstnt1(svbool_t, int64_t *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))\n" "void svstnt1(svbool_t, int16_t *, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))\n" "void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))\n" "void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))\n" "void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))\n" "void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))\n" "void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))\n" "void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))\n" "void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))\n" "void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))\n" "void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))\n" "void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))\n" "void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))\n" "svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))\n" "svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))\n" "svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))\n" "svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))\n" "svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))\n" "svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))\n" "svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))\n" "svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))\n" "svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))\n" "svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))\n" "svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))\n" "svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))\n" "svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))\n" "svint8_t svsub_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))\n" "svint32_t svsub_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))\n" "svint64_t svsub_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))\n" "svint16_t svsub_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))\n" "svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))\n" "svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))\n" "svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))\n" "svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))\n" "svint8_t svsub_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))\n" "svint32_t svsub_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))\n" "svint64_t svsub_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))\n" "svint16_t svsub_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))\n" "svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))\n" "svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))\n" "svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))\n" "svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))\n" "svint8_t svsub_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))\n" "svint32_t svsub_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))\n" "svint64_t svsub_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))\n" "svint16_t svsub_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))\n" "svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))\n" "svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))\n" "svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))\n" "svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))\n" "svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))\n" "svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))\n" "svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))\n" "svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))\n" "svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))\n" "svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))\n" "svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))\n" "svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))\n" "svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))\n" "svint8_t svsub_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))\n" "svint32_t svsub_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))\n" "svint64_t svsub_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))\n" "svint16_t svsub_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))\n" "svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))\n" "svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))\n" "svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))\n" "svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))\n" "svint8_t svsub_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))\n" "svint32_t svsub_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))\n" "svint64_t svsub_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))\n" "svint16_t svsub_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))\n" "svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))\n" "svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))\n" "svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))\n" "svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))\n" "svint8_t svsub_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))\n" "svint32_t svsub_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))\n" "svint64_t svsub_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))\n" "svint16_t svsub_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))\n" "svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))\n" "svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))\n" "svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))\n" "svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))\n" "svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))\n" "svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))\n" "svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))\n" "svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))\n" "svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))\n" "svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))\n" "svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))\n" "svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))\n" "svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))\n" "svint8_t svsubr_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))\n" "svint32_t svsubr_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))\n" "svint64_t svsubr_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))\n" "svint16_t svsubr_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))\n" "svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))\n" "svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))\n" "svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))\n" "svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))\n" "svint8_t svsubr_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))\n" "svint32_t svsubr_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))\n" "svint64_t svsubr_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))\n" "svint16_t svsubr_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))\n" "svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))\n" "svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))\n" "svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))\n" "svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))\n" "svint8_t svsubr_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))\n" "svint32_t svsubr_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))\n" "svint64_t svsubr_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))\n" "svint16_t svsubr_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))\n" "svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))\n" "svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))\n" "svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))\n" "svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))\n" "svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))\n" "svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))\n" "svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))\n" "svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))\n" "svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))\n" "svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))\n" "svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))\n" "svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))\n" "svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))\n" "svint8_t svsubr_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))\n" "svint32_t svsubr_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))\n" "svint64_t svsubr_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))\n" "svint16_t svsubr_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))\n" "svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))\n" "svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))\n" "svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))\n" "svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))\n" "svint8_t svsubr_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))\n" "svint32_t svsubr_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))\n" "svint64_t svsubr_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))\n" "svint16_t svsubr_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))\n" "svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))\n" "svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))\n" "svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))\n" "svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))\n" "svint8_t svsubr_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))\n" "svint32_t svsubr_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))\n" "svint64_t svsubr_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))\n" "svint16_t svsubr_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))\n" "svuint8_t svtbl(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))\n" "svuint32_t svtbl(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))\n" "svuint64_t svtbl(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))\n" "svuint16_t svtbl(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))\n" "svint8_t svtbl(svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))\n" "svfloat64_t svtbl(svfloat64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))\n" "svfloat32_t svtbl(svfloat32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))\n" "svfloat16_t svtbl(svfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))\n" "svint32_t svtbl(svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))\n" "svint64_t svtbl(svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))\n" "svint16_t svtbl(svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))\n" "svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))\n" "svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))\n" "svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))\n" "svuint8_t svtrn1(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))\n" "svuint32_t svtrn1(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))\n" "svuint64_t svtrn1(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))\n" "svuint16_t svtrn1(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))\n" "svint8_t svtrn1(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))\n" "svfloat64_t svtrn1(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))\n" "svfloat32_t svtrn1(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))\n" "svfloat16_t svtrn1(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))\n" "svint32_t svtrn1(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))\n" "svint64_t svtrn1(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))\n" "svint16_t svtrn1(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))\n" "svuint8_t svtrn2(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))\n" "svuint32_t svtrn2(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))\n" "svuint64_t svtrn2(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))\n" "svuint16_t svtrn2(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))\n" "svint8_t svtrn2(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))\n" "svfloat64_t svtrn2(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))\n" "svfloat32_t svtrn2(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))\n" "svfloat16_t svtrn2(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))\n" "svint32_t svtrn2(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))\n" "svint64_t svtrn2(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))\n" "svint16_t svtrn2(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))\n" "svfloat64_t svtsmul(svfloat64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))\n" "svfloat32_t svtsmul(svfloat32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))\n" "svfloat16_t svtsmul(svfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))\n" "svfloat64_t svtssel(svfloat64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))\n" "svfloat32_t svtssel(svfloat32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))\n" "svfloat16_t svtssel(svfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))\n" "svbool_t svunpkhi(svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))\n" "svint32_t svunpkhi(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))\n" "svint64_t svunpkhi(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))\n" "svint16_t svunpkhi(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))\n" "svuint32_t svunpkhi(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))\n" "svuint64_t svunpkhi(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))\n" "svuint16_t svunpkhi(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))\n" "svbool_t svunpklo(svbool_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))\n" "svint32_t svunpklo(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))\n" "svint64_t svunpklo(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))\n" "svint16_t svunpklo(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))\n" "svuint32_t svunpklo(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))\n" "svuint64_t svunpklo(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))\n" "svuint16_t svunpklo(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))\n" "svuint8_t svuzp1(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))\n" "svuint32_t svuzp1(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))\n" "svuint64_t svuzp1(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))\n" "svuint16_t svuzp1(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))\n" "svint8_t svuzp1(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))\n" "svfloat64_t svuzp1(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))\n" "svfloat32_t svuzp1(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))\n" "svfloat16_t svuzp1(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))\n" "svint32_t svuzp1(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))\n" "svint64_t svuzp1(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))\n" "svint16_t svuzp1(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))\n" "svuint8_t svuzp2(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))\n" "svuint32_t svuzp2(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))\n" "svuint64_t svuzp2(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))\n" "svuint16_t svuzp2(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))\n" "svint8_t svuzp2(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))\n" "svfloat64_t svuzp2(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))\n" "svfloat32_t svuzp2(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))\n" "svfloat16_t svuzp2(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))\n" "svint32_t svuzp2(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))\n" "svint64_t svuzp2(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))\n" "svint16_t svuzp2(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))\n" "svbool_t svwhilele_b8(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))\n" "svbool_t svwhilele_b32(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))\n" "svbool_t svwhilele_b64(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))\n" "svbool_t svwhilele_b16(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))\n" "svbool_t svwhilele_b8(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))\n" "svbool_t svwhilele_b32(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))\n" "svbool_t svwhilele_b64(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))\n" "svbool_t svwhilele_b16(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))\n" "svbool_t svwhilele_b8(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))\n" "svbool_t svwhilele_b32(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))\n" "svbool_t svwhilele_b64(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))\n" "svbool_t svwhilele_b16(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))\n" "svbool_t svwhilele_b8(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))\n" "svbool_t svwhilele_b32(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))\n" "svbool_t svwhilele_b64(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))\n" "svbool_t svwhilele_b16(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))\n" "svbool_t svwhilelt_b8(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))\n" "svbool_t svwhilelt_b32(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))\n" "svbool_t svwhilelt_b64(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))\n" "svbool_t svwhilelt_b16(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))\n" "svbool_t svwhilelt_b8(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))\n" "svbool_t svwhilelt_b32(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))\n" "svbool_t svwhilelt_b64(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))\n" "svbool_t svwhilelt_b16(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))\n" "svbool_t svwhilelt_b8(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))\n" "svbool_t svwhilelt_b32(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))\n" "svbool_t svwhilelt_b64(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))\n" "svbool_t svwhilelt_b16(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))\n" "svbool_t svwhilelt_b8(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))\n" "svbool_t svwhilelt_b32(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))\n" "svbool_t svwhilelt_b64(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))\n" "svbool_t svwhilelt_b16(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))\n" "svuint8_t svzip1(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))\n" "svuint32_t svzip1(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))\n" "svuint64_t svzip1(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))\n" "svuint16_t svzip1(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))\n" "svint8_t svzip1(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))\n" "svfloat64_t svzip1(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))\n" "svfloat32_t svzip1(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))\n" "svfloat16_t svzip1(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))\n" "svint32_t svzip1(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))\n" "svint64_t svzip1(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))\n" "svint16_t svzip1(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))\n" "svuint8_t svzip2(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))\n" "svuint32_t svzip2(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))\n" "svuint64_t svzip2(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))\n" "svuint16_t svzip2(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))\n" "svint8_t svzip2(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))\n" "svfloat64_t svzip2(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))\n" "svfloat32_t svzip2(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))\n" "svfloat16_t svzip2(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))\n" "svint32_t svzip2(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))\n" "svint64_t svzip2(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))\n" "svint16_t svzip2(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))\n" "svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))\n" "svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))\n" "svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))\n" "svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))\n" "svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))\n" "svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))\n" "svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))\n" "svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))\n" "svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))\n" "svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))\n" "bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))\n" "svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))\n" "bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))\n" "svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))\n" "svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))\n" "svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))\n" "svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))\n" "svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))\n" "svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))\n" "svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))\n" "svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))\n" "svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))\n" "svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))\n" "svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))\n" "svbfloat16_t svdup_n_bf16(bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))\n" "svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))\n" "svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))\n" "svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))\n" "svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))\n" "svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))\n" "svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))\n" "svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))\n" "svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))\n" "svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))\n" "svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))\n" "svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))\n" "bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))\n" "bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))\n" "svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))\n" "svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))\n" "svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))\n" "svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))\n" "svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))\n" "svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))\n" "svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))\n" "svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))\n" "svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))\n" "svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))\n" "svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))\n" "svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))\n" "svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))\n" "svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))\n" "svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))\n" "uint64_t svlen_bf16(svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))\n" "svbfloat16_t svrev_bf16(svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))\n" "svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))\n" "svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))\n" "svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))\n" "svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))\n" "svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))\n" "void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))\n" "void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))\n" "void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))\n" "void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))\n" "void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))\n" "void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))\n" "void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))\n" "void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))\n" "void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))\n" "void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))\n" "svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))\n" "svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))\n" "svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16)))\n" "svbfloat16x2_t svundef2_bf16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16)))\n" "svbfloat16x3_t svundef3_bf16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16)))\n" "svbfloat16x4_t svundef4_bf16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16)))\n" "svbfloat16_t svundef_bf16(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))\n" "svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))\n" "svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))\n" "svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))\n" "svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))\n" "svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))\n" "svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))\n" "svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))\n" "svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))\n" "svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))\n" "svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))\n" "svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))\n" "svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))\n" "svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))\n" "svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))\n" "bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))\n" "svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))\n" "bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))\n" "svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))\n" "svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))\n" "svuint16_t svcnt_x(svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))\n" "svuint16_t svcnt_z(svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))\n" "svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))\n" "svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))\n" "svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))\n" "svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))\n" "svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))\n" "svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))\n" "svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))\n" "svbfloat16_t svdup_bf16(bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))\n" "svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))\n" "svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))\n" "svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))\n" "svbfloat16_t svdup_lane(svbfloat16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))\n" "svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))\n" "svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))\n" "svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))\n" "svbfloat16_t svget2(svbfloat16x2_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))\n" "svbfloat16_t svget3(svbfloat16x3_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))\n" "svbfloat16_t svget4(svbfloat16x4_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))\n" "svbfloat16_t svinsr(svbfloat16_t, bfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))\n" "bfloat16_t svlasta(svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))\n" "bfloat16_t svlastb(svbool_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))\n" "svbfloat16_t svld1(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))\n" "svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))\n" "svbfloat16_t svld1rq(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))\n" "svbfloat16x2_t svld2(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))\n" "svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))\n" "svbfloat16x3_t svld3(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))\n" "svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))\n" "svbfloat16x4_t svld4(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))\n" "svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))\n" "svbfloat16_t svldff1(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))\n" "svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))\n" "svbfloat16_t svldnf1(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))\n" "svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))\n" "svbfloat16_t svldnt1(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))\n" "svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))\n" "uint64_t svlen(svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))\n" "svbfloat16_t svrev(svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))\n" "svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))\n" "svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))\n" "svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))\n" "svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))\n" "svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))\n" "void svst1(svbool_t, bfloat16_t *, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))\n" "void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))\n" "void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))\n" "void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))\n" "void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))\n" "void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))\n" "void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))\n" "void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))\n" "void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))\n" "void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))\n" "svbfloat16_t svtbl(svbfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))\n" "svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))\n" "svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))\n" "svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))\n" "svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))\n" "svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))\n" "svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))\n" "svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))\n" "svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))\n" "svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))\n" "svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))\n" "svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))\n" "svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))\n" "svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))\n" "svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))\n" "svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))\n" "svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))\n" "svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))\n" "svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))\n" "svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))\n" "svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))\n" "svuint8_t svld1ro_u8(svbool_t, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))\n" "svuint32_t svld1ro_u32(svbool_t, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))\n" "svuint64_t svld1ro_u64(svbool_t, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))\n" "svuint16_t svld1ro_u16(svbool_t, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))\n" "svint8_t svld1ro_s8(svbool_t, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))\n" "svfloat64_t svld1ro_f64(svbool_t, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))\n" "svfloat32_t svld1ro_f32(svbool_t, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))\n" "svfloat16_t svld1ro_f16(svbool_t, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))\n" "svint32_t svld1ro_s32(svbool_t, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))\n" "svint64_t svld1ro_s64(svbool_t, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))\n" "svint16_t svld1ro_s16(svbool_t, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))\n" "svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))\n" "svuint8_t svtrn1q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))\n" "svuint32_t svtrn1q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))\n" "svuint64_t svtrn1q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))\n" "svuint16_t svtrn1q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))\n" "svint8_t svtrn1q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))\n" "svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))\n" "svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))\n" "svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))\n" "svint32_t svtrn1q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))\n" "svint64_t svtrn1q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))\n" "svint16_t svtrn1q_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))\n" "svuint8_t svtrn2q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))\n" "svuint32_t svtrn2q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))\n" "svuint64_t svtrn2q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))\n" "svuint16_t svtrn2q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))\n" "svint8_t svtrn2q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))\n" "svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))\n" "svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))\n" "svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))\n" "svint32_t svtrn2q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))\n" "svint64_t svtrn2q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))\n" "svint16_t svtrn2q_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))\n" "svuint8_t svuzp1q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))\n" "svuint32_t svuzp1q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))\n" "svuint64_t svuzp1q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))\n" "svuint16_t svuzp1q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))\n" "svint8_t svuzp1q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))\n" "svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))\n" "svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))\n" "svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))\n" "svint32_t svuzp1q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))\n" "svint64_t svuzp1q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))\n" "svint16_t svuzp1q_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))\n" "svuint8_t svuzp2q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))\n" "svuint32_t svuzp2q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))\n" "svuint64_t svuzp2q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))\n" "svuint16_t svuzp2q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))\n" "svint8_t svuzp2q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))\n" "svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))\n" "svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))\n" "svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))\n" "svint32_t svuzp2q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))\n" "svint64_t svuzp2q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))\n" "svint16_t svuzp2q_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))\n" "svuint8_t svzip1q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))\n" "svuint32_t svzip1q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))\n" "svuint64_t svzip1q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))\n" "svuint16_t svzip1q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))\n" "svint8_t svzip1q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))\n" "svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))\n" "svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))\n" "svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))\n" "svint32_t svzip1q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))\n" "svint64_t svzip1q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))\n" "svint16_t svzip1q_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))\n" "svuint8_t svzip2q_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))\n" "svuint32_t svzip2q_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))\n" "svuint64_t svzip2q_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))\n" "svuint16_t svzip2q_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))\n" "svint8_t svzip2q_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))\n" "svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))\n" "svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))\n" "svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))\n" "svint32_t svzip2q_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))\n" "svint64_t svzip2q_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))\n" "svint16_t svzip2q_s16(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))\n" "svuint8_t svld1ro(svbool_t, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))\n" "svuint32_t svld1ro(svbool_t, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))\n" "svuint64_t svld1ro(svbool_t, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))\n" "svuint16_t svld1ro(svbool_t, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))\n" "svint8_t svld1ro(svbool_t, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))\n" "svfloat64_t svld1ro(svbool_t, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))\n" "svfloat32_t svld1ro(svbool_t, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))\n" "svfloat16_t svld1ro(svbool_t, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))\n" "svint32_t svld1ro(svbool_t, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))\n" "svint64_t svld1ro(svbool_t, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))\n" "svint16_t svld1ro(svbool_t, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))\n" "svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))\n" "svuint8_t svtrn1q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))\n" "svuint32_t svtrn1q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))\n" "svuint64_t svtrn1q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))\n" "svuint16_t svtrn1q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))\n" "svint8_t svtrn1q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))\n" "svfloat64_t svtrn1q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))\n" "svfloat32_t svtrn1q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))\n" "svfloat16_t svtrn1q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))\n" "svint32_t svtrn1q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))\n" "svint64_t svtrn1q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))\n" "svint16_t svtrn1q(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))\n" "svuint8_t svtrn2q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))\n" "svuint32_t svtrn2q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))\n" "svuint64_t svtrn2q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))\n" "svuint16_t svtrn2q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))\n" "svint8_t svtrn2q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))\n" "svfloat64_t svtrn2q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))\n" "svfloat32_t svtrn2q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))\n" "svfloat16_t svtrn2q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))\n" "svint32_t svtrn2q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))\n" "svint64_t svtrn2q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))\n" "svint16_t svtrn2q(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))\n" "svuint8_t svuzp1q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))\n" "svuint32_t svuzp1q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))\n" "svuint64_t svuzp1q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))\n" "svuint16_t svuzp1q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))\n" "svint8_t svuzp1q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))\n" "svfloat64_t svuzp1q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))\n" "svfloat32_t svuzp1q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))\n" "svfloat16_t svuzp1q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))\n" "svint32_t svuzp1q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))\n" "svint64_t svuzp1q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))\n" "svint16_t svuzp1q(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))\n" "svuint8_t svuzp2q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))\n" "svuint32_t svuzp2q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))\n" "svuint64_t svuzp2q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))\n" "svuint16_t svuzp2q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))\n" "svint8_t svuzp2q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))\n" "svfloat64_t svuzp2q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))\n" "svfloat32_t svuzp2q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))\n" "svfloat16_t svuzp2q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))\n" "svint32_t svuzp2q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))\n" "svint64_t svuzp2q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))\n" "svint16_t svuzp2q(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))\n" "svuint8_t svzip1q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))\n" "svuint32_t svzip1q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))\n" "svuint64_t svzip1q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))\n" "svuint16_t svzip1q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))\n" "svint8_t svzip1q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))\n" "svfloat64_t svzip1q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))\n" "svfloat32_t svzip1q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))\n" "svfloat16_t svzip1q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))\n" "svint32_t svzip1q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))\n" "svint64_t svzip1q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))\n" "svint16_t svzip1q(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))\n" "svuint8_t svzip2q(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))\n" "svuint32_t svzip2q(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))\n" "svuint64_t svzip2q(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))\n" "svuint16_t svzip2q(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))\n" "svint8_t svzip2q(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))\n" "svfloat64_t svzip2q(svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))\n" "svfloat32_t svzip2q(svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))\n" "svfloat16_t svzip2q(svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))\n" "svint32_t svzip2q(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))\n" "svint64_t svzip2q(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))\n" "svint16_t svzip2q(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))\n" "svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))\n" "svbfloat16_t svld1ro(svbool_t, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))\n" "svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))\n" "svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))\n" "svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))\n" "svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))\n" "svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))\n" "svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))\n" "svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))\n" "svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))\n" "svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))\n" "svint32_t svmmla(svint32_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))\n" "svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))\n" "svint32_t svsudot(svint32_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))\n" "svint32_t svsudot(svint32_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))\n" "svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))\n" "svint32_t svusdot(svint32_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))\n" "svint32_t svusdot(svint32_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))\n" "svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))\n" "svint32_t svusmmla(svint32_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))\n" "svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))\n" "svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))\n" "svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))\n" "svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))\n" "svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))\n" "svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))\n" "svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))\n" "svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))\n" "svint8_t svaba_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))\n" "svint32_t svaba_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))\n" "svint64_t svaba_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))\n" "svint16_t svaba_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))\n" "svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))\n" "svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))\n" "svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))\n" "svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))\n" "svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))\n" "svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))\n" "svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))\n" "svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))\n" "svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))\n" "svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))\n" "svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))\n" "svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))\n" "svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))\n" "svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))\n" "svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))\n" "svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))\n" "svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))\n" "svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))\n" "svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))\n" "svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))\n" "svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))\n" "svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))\n" "svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))\n" "svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))\n" "svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))\n" "svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))\n" "svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))\n" "svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))\n" "svint32_t svabdlb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))\n" "svint64_t svabdlb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))\n" "svint16_t svabdlb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))\n" "svuint32_t svabdlb_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))\n" "svuint64_t svabdlb_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))\n" "svuint16_t svabdlb_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))\n" "svint32_t svabdlb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))\n" "svint64_t svabdlb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))\n" "svint16_t svabdlb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))\n" "svuint32_t svabdlb_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))\n" "svuint64_t svabdlb_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))\n" "svuint16_t svabdlb_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))\n" "svint32_t svabdlt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))\n" "svint64_t svabdlt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))\n" "svint16_t svabdlt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))\n" "svuint32_t svabdlt_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))\n" "svuint64_t svabdlt_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))\n" "svuint16_t svabdlt_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))\n" "svint32_t svabdlt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))\n" "svint64_t svabdlt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))\n" "svint16_t svabdlt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))\n" "svuint32_t svabdlt_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))\n" "svuint64_t svabdlt_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))\n" "svuint16_t svabdlt_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))\n" "svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))\n" "svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))\n" "svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))\n" "svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))\n" "svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))\n" "svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))\n" "svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))\n" "svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))\n" "svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))\n" "svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))\n" "svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))\n" "svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))\n" "svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))\n" "svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))\n" "svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))\n" "svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))\n" "svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))\n" "svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))\n" "svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))\n" "svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))\n" "svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))\n" "svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))\n" "svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))\n" "svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))\n" "svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))\n" "svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))\n" "svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))\n" "svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))\n" "svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))\n" "svint16_t svaddhnb_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))\n" "svint32_t svaddhnb_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))\n" "svint8_t svaddhnb_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))\n" "svuint16_t svaddhnb_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))\n" "svuint32_t svaddhnb_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))\n" "svuint8_t svaddhnb_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))\n" "svint16_t svaddhnb_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))\n" "svint32_t svaddhnb_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))\n" "svint8_t svaddhnb_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))\n" "svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))\n" "svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))\n" "svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))\n" "svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))\n" "svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))\n" "svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))\n" "svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))\n" "svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))\n" "svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))\n" "svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))\n" "svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))\n" "svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))\n" "svint32_t svaddlb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))\n" "svint64_t svaddlb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))\n" "svint16_t svaddlb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))\n" "svuint32_t svaddlb_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))\n" "svuint64_t svaddlb_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))\n" "svuint16_t svaddlb_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))\n" "svint32_t svaddlb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))\n" "svint64_t svaddlb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))\n" "svint16_t svaddlb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))\n" "svuint32_t svaddlb_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))\n" "svuint64_t svaddlb_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))\n" "svuint16_t svaddlb_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))\n" "svint32_t svaddlbt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))\n" "svint64_t svaddlbt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))\n" "svint16_t svaddlbt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))\n" "svint32_t svaddlbt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))\n" "svint64_t svaddlbt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))\n" "svint16_t svaddlbt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))\n" "svint32_t svaddlt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))\n" "svint64_t svaddlt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))\n" "svint16_t svaddlt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))\n" "svuint32_t svaddlt_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))\n" "svuint64_t svaddlt_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))\n" "svuint16_t svaddlt_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))\n" "svint32_t svaddlt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))\n" "svint64_t svaddlt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))\n" "svint16_t svaddlt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))\n" "svuint32_t svaddlt_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))\n" "svuint64_t svaddlt_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))\n" "svuint16_t svaddlt_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))\n" "svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))\n" "svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))\n" "svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))\n" "svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))\n" "svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))\n" "svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))\n" "svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))\n" "svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))\n" "svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))\n" "svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))\n" "svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))\n" "svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))\n" "svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))\n" "svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))\n" "svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))\n" "svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))\n" "svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))\n" "svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))\n" "svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))\n" "svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))\n" "svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))\n" "svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))\n" "svint32_t svaddwb_n_s32(svint32_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))\n" "svint64_t svaddwb_n_s64(svint64_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))\n" "svint16_t svaddwb_n_s16(svint16_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))\n" "svuint32_t svaddwb_n_u32(svuint32_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))\n" "svuint64_t svaddwb_n_u64(svuint64_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))\n" "svuint16_t svaddwb_n_u16(svuint16_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))\n" "svint32_t svaddwb_s32(svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))\n" "svint64_t svaddwb_s64(svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))\n" "svint16_t svaddwb_s16(svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))\n" "svuint32_t svaddwb_u32(svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))\n" "svuint64_t svaddwb_u64(svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))\n" "svuint16_t svaddwb_u16(svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))\n" "svint32_t svaddwt_n_s32(svint32_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))\n" "svint64_t svaddwt_n_s64(svint64_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))\n" "svint16_t svaddwt_n_s16(svint16_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))\n" "svuint32_t svaddwt_n_u32(svuint32_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))\n" "svuint64_t svaddwt_n_u64(svuint64_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))\n" "svuint16_t svaddwt_n_u16(svuint16_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))\n" "svint32_t svaddwt_s32(svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))\n" "svint64_t svaddwt_s64(svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))\n" "svint16_t svaddwt_s16(svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))\n" "svuint32_t svaddwt_u32(svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))\n" "svuint64_t svaddwt_u64(svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))\n" "svuint16_t svaddwt_u16(svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))\n" "svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))\n" "svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))\n" "svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))\n" "svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))\n" "svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))\n" "svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))\n" "svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))\n" "svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))\n" "svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))\n" "svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))\n" "svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))\n" "svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))\n" "svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))\n" "svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))\n" "svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))\n" "svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))\n" "svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))\n" "svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))\n" "svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))\n" "svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))\n" "svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))\n" "svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))\n" "svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))\n" "svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))\n" "svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))\n" "svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))\n" "svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))\n" "svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))\n" "svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))\n" "svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))\n" "svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))\n" "svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))\n" "svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))\n" "svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))\n" "svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))\n" "svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))\n" "svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))\n" "svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))\n" "svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))\n" "svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))\n" "svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))\n" "svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))\n" "svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))\n" "svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))\n" "svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))\n" "svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))\n" "svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))\n" "svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))\n" "svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))\n" "svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))\n" "svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))\n" "svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))\n" "svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))\n" "svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))\n" "svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))\n" "svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))\n" "svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))\n" "svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))\n" "svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))\n" "svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))\n" "svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))\n" "svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))\n" "svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))\n" "svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))\n" "svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))\n" "svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))\n" "svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))\n" "svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))\n" "svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))\n" "svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))\n" "svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))\n" "svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))\n" "svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))\n" "svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))\n" "svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))\n" "svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))\n" "svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))\n" "svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))\n" "svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))\n" "svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))\n" "svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))\n" "svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))\n" "svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))\n" "svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))\n" "svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))\n" "svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))\n" "svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))\n" "svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))\n" "svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))\n" "svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))\n" "svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))\n" "svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))\n" "svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))\n" "svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))\n" "svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))\n" "svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))\n" "svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))\n" "svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))\n" "svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))\n" "svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))\n" "svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))\n" "svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))\n" "svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))\n" "svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))\n" "svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))\n" "svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))\n" "svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))\n" "svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))\n" "svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))\n" "svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))\n" "svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))\n" "svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))\n" "svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))\n" "svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))\n" "svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))\n" "svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))\n" "svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))\n" "svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))\n" "svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))\n" "svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))\n" "svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))\n" "svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))\n" "svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))\n" "svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))\n" "svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))\n" "svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))\n" "svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))\n" "svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))\n" "svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))\n" "svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))\n" "svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))\n" "svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))\n" "svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))\n" "svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))\n" "svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))\n" "svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))\n" "svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))\n" "svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))\n" "svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))\n" "svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))\n" "svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))\n" "svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))\n" "svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))\n" "svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))\n" "svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))\n" "svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))\n" "svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))\n" "svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))\n" "svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))\n" "svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))\n" "svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))\n" "svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))\n" "svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))\n" "svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))\n" "svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))\n" "svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))\n" "svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))\n" "svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))\n" "svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))\n" "svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))\n" "svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))\n" "svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))\n" "svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))\n" "svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))\n" "svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))\n" "svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))\n" "svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))\n" "svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))\n" "svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))\n" "svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))\n" "svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))\n" "svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))\n" "svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))\n" "svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))\n" "svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))\n" "svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))\n" "svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))\n" "svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))\n" "svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))\n" "svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))\n" "svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))\n" "svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))\n" "svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))\n" "svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))\n" "svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))\n" "svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))\n" "svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))\n" "svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))\n" "svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))\n" "svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))\n" "svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))\n" "svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))\n" "svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))\n" "svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))\n" "svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))\n" "svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))\n" "svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))\n" "svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))\n" "svuint8_t svhistseg_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))\n" "svuint8_t svhistseg_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))\n" "svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))\n" "svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))\n" "svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))\n" "svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))\n" "svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))\n" "svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))\n" "svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))\n" "svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))\n" "svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))\n" "svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))\n" "svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))\n" "svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))\n" "svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))\n" "svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))\n" "svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))\n" "svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))\n" "svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))\n" "svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))\n" "svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))\n" "svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))\n" "svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))\n" "svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))\n" "svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))\n" "svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))\n" "svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))\n" "svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))\n" "svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))\n" "svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))\n" "svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))\n" "svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))\n" "svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))\n" "svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))\n" "svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))\n" "svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))\n" "svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))\n" "svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))\n" "svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))\n" "svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))\n" "svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))\n" "svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))\n" "svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))\n" "svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))\n" "svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))\n" "svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))\n" "svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))\n" "svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))\n" "svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))\n" "svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))\n" "svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))\n" "svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))\n" "svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))\n" "svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))\n" "svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))\n" "svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))\n" "svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))\n" "svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))\n" "svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))\n" "svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))\n" "svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))\n" "svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))\n" "svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))\n" "svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))\n" "svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))\n" "svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))\n" "svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))\n" "svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))\n" "svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))\n" "svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))\n" "svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))\n" "svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))\n" "svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))\n" "svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))\n" "svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))\n" "svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))\n" "svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))\n" "svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))\n" "svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))\n" "svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))\n" "svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))\n" "svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))\n" "svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))\n" "svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))\n" "svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))\n" "svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))\n" "svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))\n" "svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))\n" "svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))\n" "svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))\n" "svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))\n" "svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))\n" "svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))\n" "svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))\n" "svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))\n" "svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))\n" "svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))\n" "svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))\n" "svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))\n" "svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))\n" "svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))\n" "svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))\n" "svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))\n" "svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))\n" "svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))\n" "svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))\n" "svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))\n" "svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))\n" "svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))\n" "svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))\n" "svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))\n" "svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))\n" "svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))\n" "svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))\n" "svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))\n" "svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))\n" "svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))\n" "svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))\n" "svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))\n" "svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))\n" "svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))\n" "svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))\n" "svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))\n" "svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))\n" "svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))\n" "svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))\n" "svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))\n" "svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))\n" "svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))\n" "svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))\n" "svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))\n" "svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))\n" "svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))\n" "svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))\n" "svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))\n" "svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))\n" "svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))\n" "svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))\n" "svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))\n" "svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))\n" "svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))\n" "svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))\n" "svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))\n" "svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))\n" "svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))\n" "svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))\n" "svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))\n" "svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))\n" "svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))\n" "svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))\n" "svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))\n" "svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))\n" "svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))\n" "svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))\n" "svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))\n" "svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))\n" "svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))\n" "svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))\n" "svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))\n" "svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))\n" "svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))\n" "svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))\n" "svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))\n" "svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))\n" "svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))\n" "svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))\n" "svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))\n" "svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))\n" "svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))\n" "svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))\n" "svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))\n" "svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))\n" "svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))\n" "svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))\n" "svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))\n" "svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))\n" "svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))\n" "svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))\n" "svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))\n" "svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))\n" "svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))\n" "svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))\n" "svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))\n" "svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))\n" "svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))\n" "svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))\n" "svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))\n" "svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))\n" "svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))\n" "svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))\n" "svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))\n" "svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))\n" "svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))\n" "svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))\n" "svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))\n" "svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))\n" "svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))\n" "svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))\n" "svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))\n" "svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))\n" "svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))\n" "svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))\n" "svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))\n" "svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))\n" "svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))\n" "svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))\n" "svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))\n" "svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))\n" "svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))\n" "svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))\n" "svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))\n" "svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))\n" "svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))\n" "svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))\n" "svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))\n" "svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))\n" "svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))\n" "svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))\n" "svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))\n" "svint64_t svlogb_f64_x(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))\n" "svint32_t svlogb_f32_x(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))\n" "svint16_t svlogb_f16_x(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))\n" "svint64_t svlogb_f64_z(svbool_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))\n" "svint32_t svlogb_f32_z(svbool_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))\n" "svint16_t svlogb_f16_z(svbool_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))\n" "svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))\n" "svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))\n" "svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))\n" "svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))\n" "svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))\n" "svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))\n" "svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))\n" "svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))\n" "svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))\n" "svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))\n" "svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))\n" "svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))\n" "svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))\n" "svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))\n" "svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))\n" "svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))\n" "svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))\n" "svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))\n" "svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))\n" "svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))\n" "svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))\n" "svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))\n" "svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))\n" "svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))\n" "svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))\n" "svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))\n" "svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))\n" "svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))\n" "svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))\n" "svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))\n" "svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))\n" "svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))\n" "svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))\n" "svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))\n" "svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))\n" "svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))\n" "svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))\n" "svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))\n" "svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))\n" "svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))\n" "svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))\n" "svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))\n" "svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))\n" "svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))\n" "svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))\n" "svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))\n" "svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))\n" "svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))\n" "svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))\n" "svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))\n" "svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))\n" "svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))\n" "svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))\n" "svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))\n" "svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))\n" "svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))\n" "svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))\n" "svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))\n" "svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))\n" "svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))\n" "svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))\n" "svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))\n" "svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))\n" "svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))\n" "svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))\n" "svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))\n" "svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))\n" "svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))\n" "svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))\n" "svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))\n" "svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))\n" "svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))\n" "svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))\n" "svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))\n" "svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))\n" "svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))\n" "svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))\n" "svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))\n" "svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))\n" "svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))\n" "svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))\n" "svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))\n" "svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))\n" "svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))\n" "svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))\n" "svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))\n" "svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))\n" "svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))\n" "svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))\n" "svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))\n" "svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))\n" "svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))\n" "svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))\n" "svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))\n" "svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))\n" "svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))\n" "svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))\n" "svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))\n" "svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))\n" "svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))\n" "svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))\n" "svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))\n" "svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))\n" "svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))\n" "svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))\n" "svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))\n" "svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))\n" "svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))\n" "svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))\n" "svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))\n" "svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))\n" "svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))\n" "svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))\n" "svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))\n" "svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))\n" "svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))\n" "svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))\n" "svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))\n" "svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))\n" "svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))\n" "svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))\n" "svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))\n" "svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))\n" "svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))\n" "svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))\n" "svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))\n" "svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))\n" "svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))\n" "svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))\n" "svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))\n" "svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))\n" "svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))\n" "svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))\n" "svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))\n" "svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))\n" "svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))\n" "svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))\n" "svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))\n" "svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))\n" "svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))\n" "svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))\n" "svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))\n" "svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))\n" "svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))\n" "svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))\n" "svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))\n" "svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))\n" "svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))\n" "svint32_t svmovlb_s32(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))\n" "svint64_t svmovlb_s64(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))\n" "svint16_t svmovlb_s16(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))\n" "svuint32_t svmovlb_u32(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))\n" "svuint64_t svmovlb_u64(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))\n" "svuint16_t svmovlb_u16(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))\n" "svint32_t svmovlt_s32(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))\n" "svint64_t svmovlt_s64(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))\n" "svint16_t svmovlt_s16(svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))\n" "svuint32_t svmovlt_u32(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))\n" "svuint64_t svmovlt_u64(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))\n" "svuint16_t svmovlt_u16(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))\n" "svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))\n" "svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))\n" "svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))\n" "svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))\n" "svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))\n" "svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))\n" "svint32_t svmullb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))\n" "svint64_t svmullb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))\n" "svint16_t svmullb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))\n" "svuint32_t svmullb_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))\n" "svuint64_t svmullb_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))\n" "svuint16_t svmullb_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))\n" "svint32_t svmullb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))\n" "svint64_t svmullb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))\n" "svint16_t svmullb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))\n" "svuint32_t svmullb_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))\n" "svuint64_t svmullb_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))\n" "svuint16_t svmullb_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))\n" "svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))\n" "svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))\n" "svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))\n" "svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))\n" "svint32_t svmullt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))\n" "svint64_t svmullt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))\n" "svint16_t svmullt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))\n" "svuint32_t svmullt_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))\n" "svuint64_t svmullt_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))\n" "svuint16_t svmullt_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))\n" "svint32_t svmullt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))\n" "svint64_t svmullt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))\n" "svint16_t svmullt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))\n" "svuint32_t svmullt_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))\n" "svuint64_t svmullt_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))\n" "svuint16_t svmullt_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))\n" "svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))\n" "svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))\n" "svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))\n" "svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))\n" "svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))\n" "svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))\n" "svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))\n" "svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))\n" "svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))\n" "svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))\n" "svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))\n" "svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))\n" "svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))\n" "svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))\n" "svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))\n" "svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))\n" "svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))\n" "svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))\n" "svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))\n" "svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))\n" "svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))\n" "svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))\n" "svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))\n" "svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))\n" "svuint8_t svpmul_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))\n" "svuint8_t svpmul_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))\n" "svuint64_t svpmullb_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))\n" "svuint16_t svpmullb_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))\n" "svuint64_t svpmullb_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))\n" "svuint16_t svpmullb_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))\n" "svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))\n" "svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))\n" "svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))\n" "svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))\n" "svuint64_t svpmullt_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))\n" "svuint16_t svpmullt_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))\n" "svuint64_t svpmullt_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))\n" "svuint16_t svpmullt_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))\n" "svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))\n" "svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))\n" "svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))\n" "svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))\n" "svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))\n" "svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))\n" "svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))\n" "svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))\n" "svint8_t svqabs_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))\n" "svint32_t svqabs_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))\n" "svint64_t svqabs_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))\n" "svint16_t svqabs_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))\n" "svint8_t svqabs_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))\n" "svint32_t svqabs_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))\n" "svint64_t svqabs_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))\n" "svint16_t svqabs_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))\n" "svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))\n" "svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))\n" "svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))\n" "svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))\n" "svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))\n" "svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))\n" "svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))\n" "svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))\n" "svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))\n" "svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))\n" "svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))\n" "svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))\n" "svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))\n" "svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))\n" "svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))\n" "svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))\n" "svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))\n" "svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))\n" "svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))\n" "svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))\n" "svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))\n" "svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))\n" "svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))\n" "svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))\n" "svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))\n" "svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))\n" "svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))\n" "svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))\n" "svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))\n" "svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))\n" "svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))\n" "svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))\n" "svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))\n" "svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))\n" "svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))\n" "svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))\n" "svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))\n" "svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))\n" "svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))\n" "svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))\n" "svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))\n" "svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))\n" "svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))\n" "svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))\n" "svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))\n" "svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))\n" "svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))\n" "svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))\n" "svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))\n" "svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))\n" "svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))\n" "svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))\n" "svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))\n" "svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))\n" "svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))\n" "svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))\n" "svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))\n" "svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))\n" "svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))\n" "svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))\n" "svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))\n" "svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))\n" "svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))\n" "svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))\n" "svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))\n" "svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))\n" "svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))\n" "svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))\n" "svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))\n" "svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))\n" "svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))\n" "svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))\n" "svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))\n" "svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))\n" "svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))\n" "svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))\n" "svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))\n" "svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))\n" "svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))\n" "svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))\n" "svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))\n" "svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))\n" "svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))\n" "svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))\n" "svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))\n" "svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))\n" "svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))\n" "svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))\n" "svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))\n" "svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))\n" "svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))\n" "svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))\n" "svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))\n" "svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))\n" "svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))\n" "svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))\n" "svint8_t svqdmulh_n_s8(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))\n" "svint32_t svqdmulh_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))\n" "svint64_t svqdmulh_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))\n" "svint16_t svqdmulh_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))\n" "svint8_t svqdmulh_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))\n" "svint32_t svqdmulh_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))\n" "svint64_t svqdmulh_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))\n" "svint16_t svqdmulh_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))\n" "svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))\n" "svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))\n" "svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))\n" "svint32_t svqdmullb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))\n" "svint64_t svqdmullb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))\n" "svint16_t svqdmullb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))\n" "svint32_t svqdmullb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))\n" "svint64_t svqdmullb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))\n" "svint16_t svqdmullb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))\n" "svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))\n" "svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))\n" "svint32_t svqdmullt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))\n" "svint64_t svqdmullt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))\n" "svint16_t svqdmullt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))\n" "svint32_t svqdmullt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))\n" "svint64_t svqdmullt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))\n" "svint16_t svqdmullt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))\n" "svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))\n" "svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))\n" "svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))\n" "svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))\n" "svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))\n" "svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))\n" "svint8_t svqneg_s8_x(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))\n" "svint32_t svqneg_s32_x(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))\n" "svint64_t svqneg_s64_x(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))\n" "svint16_t svqneg_s16_x(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))\n" "svint8_t svqneg_s8_z(svbool_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))\n" "svint32_t svqneg_s32_z(svbool_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))\n" "svint64_t svqneg_s64_z(svbool_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))\n" "svint16_t svqneg_s16_z(svbool_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))\n" "svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))\n" "svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))\n" "svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))\n" "svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))\n" "svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))\n" "svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))\n" "svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))\n" "svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))\n" "svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))\n" "svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))\n" "svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))\n" "svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))\n" "svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))\n" "svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))\n" "svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))\n" "svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))\n" "svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))\n" "svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))\n" "svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))\n" "svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))\n" "svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))\n" "svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))\n" "svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))\n" "svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))\n" "svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))\n" "svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))\n" "svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))\n" "svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))\n" "svint8_t svqrdmulh_n_s8(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))\n" "svint32_t svqrdmulh_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))\n" "svint64_t svqrdmulh_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))\n" "svint16_t svqrdmulh_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))\n" "svint8_t svqrdmulh_s8(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))\n" "svint32_t svqrdmulh_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))\n" "svint64_t svqrdmulh_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))\n" "svint16_t svqrdmulh_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))\n" "svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))\n" "svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))\n" "svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))\n" "svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))\n" "svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))\n" "svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))\n" "svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))\n" "svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))\n" "svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))\n" "svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))\n" "svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))\n" "svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))\n" "svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))\n" "svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))\n" "svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))\n" "svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))\n" "svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))\n" "svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))\n" "svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))\n" "svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))\n" "svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))\n" "svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))\n" "svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))\n" "svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))\n" "svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))\n" "svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))\n" "svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))\n" "svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))\n" "svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))\n" "svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))\n" "svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))\n" "svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))\n" "svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))\n" "svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))\n" "svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))\n" "svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))\n" "svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))\n" "svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))\n" "svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))\n" "svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))\n" "svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))\n" "svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))\n" "svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))\n" "svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))\n" "svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))\n" "svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))\n" "svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))\n" "svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))\n" "svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))\n" "svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))\n" "svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))\n" "svint16_t svqrshrnb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))\n" "svint32_t svqrshrnb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))\n" "svint8_t svqrshrnb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))\n" "svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))\n" "svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))\n" "svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))\n" "svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))\n" "svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))\n" "svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))\n" "svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))\n" "svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))\n" "svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))\n" "svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))\n" "svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))\n" "svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))\n" "svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))\n" "svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))\n" "svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))\n" "svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))\n" "svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))\n" "svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))\n" "svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))\n" "svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))\n" "svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))\n" "svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))\n" "svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))\n" "svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))\n" "svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))\n" "svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))\n" "svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))\n" "svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))\n" "svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))\n" "svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))\n" "svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))\n" "svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))\n" "svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))\n" "svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))\n" "svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))\n" "svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))\n" "svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))\n" "svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))\n" "svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))\n" "svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))\n" "svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))\n" "svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))\n" "svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))\n" "svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))\n" "svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))\n" "svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))\n" "svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))\n" "svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))\n" "svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))\n" "svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))\n" "svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))\n" "svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))\n" "svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))\n" "svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))\n" "svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))\n" "svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))\n" "svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))\n" "svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))\n" "svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))\n" "svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))\n" "svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))\n" "svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))\n" "svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))\n" "svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))\n" "svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))\n" "svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))\n" "svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))\n" "svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))\n" "svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))\n" "svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))\n" "svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))\n" "svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))\n" "svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))\n" "svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))\n" "svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))\n" "svint16_t svqshrnb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))\n" "svint32_t svqshrnb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))\n" "svint8_t svqshrnb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))\n" "svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))\n" "svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))\n" "svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))\n" "svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))\n" "svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))\n" "svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))\n" "svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))\n" "svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))\n" "svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))\n" "svuint16_t svqshrunb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))\n" "svuint32_t svqshrunb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))\n" "svuint8_t svqshrunb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))\n" "svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))\n" "svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))\n" "svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))\n" "svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))\n" "svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))\n" "svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))\n" "svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))\n" "svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))\n" "svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))\n" "svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))\n" "svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))\n" "svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))\n" "svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))\n" "svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))\n" "svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))\n" "svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))\n" "svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))\n" "svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))\n" "svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))\n" "svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))\n" "svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))\n" "svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))\n" "svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))\n" "svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))\n" "svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))\n" "svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))\n" "svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))\n" "svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))\n" "svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))\n" "svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))\n" "svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))\n" "svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))\n" "svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))\n" "svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))\n" "svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))\n" "svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))\n" "svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))\n" "svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))\n" "svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))\n" "svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))\n" "svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))\n" "svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))\n" "svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))\n" "svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))\n" "svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))\n" "svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))\n" "svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))\n" "svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))\n" "svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))\n" "svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))\n" "svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))\n" "svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))\n" "svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))\n" "svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))\n" "svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))\n" "svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))\n" "svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))\n" "svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))\n" "svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))\n" "svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))\n" "svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))\n" "svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))\n" "svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))\n" "svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))\n" "svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))\n" "svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))\n" "svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))\n" "svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))\n" "svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))\n" "svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))\n" "svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))\n" "svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))\n" "svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))\n" "svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))\n" "svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))\n" "svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))\n" "svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))\n" "svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))\n" "svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))\n" "svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))\n" "svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))\n" "svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))\n" "svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))\n" "svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))\n" "svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))\n" "svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))\n" "svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))\n" "svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))\n" "svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))\n" "svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))\n" "svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))\n" "svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))\n" "svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))\n" "svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))\n" "svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))\n" "svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))\n" "svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))\n" "svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))\n" "svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))\n" "svint16_t svqxtnb_s32(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))\n" "svint32_t svqxtnb_s64(svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))\n" "svint8_t svqxtnb_s16(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))\n" "svuint16_t svqxtnb_u32(svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))\n" "svuint32_t svqxtnb_u64(svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))\n" "svuint8_t svqxtnb_u16(svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))\n" "svint16_t svqxtnt_s32(svint16_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))\n" "svint32_t svqxtnt_s64(svint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))\n" "svint8_t svqxtnt_s16(svint8_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))\n" "svuint16_t svqxtnt_u32(svuint16_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))\n" "svuint32_t svqxtnt_u64(svuint32_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))\n" "svuint8_t svqxtnt_u16(svuint8_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))\n" "svuint16_t svqxtunb_s32(svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))\n" "svuint32_t svqxtunb_s64(svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))\n" "svuint8_t svqxtunb_s16(svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))\n" "svuint16_t svqxtunt_s32(svuint16_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))\n" "svuint32_t svqxtunt_s64(svuint32_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))\n" "svuint8_t svqxtunt_s16(svuint8_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))\n" "svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))\n" "svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))\n" "svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))\n" "svint16_t svraddhnb_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))\n" "svint32_t svraddhnb_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))\n" "svint8_t svraddhnb_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))\n" "svuint16_t svraddhnb_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))\n" "svuint32_t svraddhnb_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))\n" "svuint8_t svraddhnb_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))\n" "svint16_t svraddhnb_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))\n" "svint32_t svraddhnb_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))\n" "svint8_t svraddhnb_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))\n" "svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))\n" "svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))\n" "svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))\n" "svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))\n" "svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))\n" "svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))\n" "svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))\n" "svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))\n" "svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))\n" "svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))\n" "svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))\n" "svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))\n" "svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))\n" "svuint32_t svrecpe_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))\n" "svuint32_t svrecpe_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))\n" "svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))\n" "svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))\n" "svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))\n" "svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))\n" "svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))\n" "svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))\n" "svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))\n" "svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))\n" "svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))\n" "svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))\n" "svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))\n" "svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))\n" "svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))\n" "svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))\n" "svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))\n" "svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))\n" "svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))\n" "svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))\n" "svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))\n" "svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))\n" "svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))\n" "svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))\n" "svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))\n" "svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))\n" "svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))\n" "svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))\n" "svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))\n" "svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))\n" "svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))\n" "svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))\n" "svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))\n" "svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))\n" "svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))\n" "svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))\n" "svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))\n" "svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))\n" "svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))\n" "svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))\n" "svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))\n" "svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))\n" "svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))\n" "svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))\n" "svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))\n" "svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))\n" "svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))\n" "svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))\n" "svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))\n" "svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))\n" "svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))\n" "svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))\n" "svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))\n" "svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))\n" "svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))\n" "svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))\n" "svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))\n" "svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))\n" "svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))\n" "svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))\n" "svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))\n" "svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))\n" "svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))\n" "svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))\n" "svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))\n" "svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))\n" "svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))\n" "svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))\n" "svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))\n" "svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))\n" "svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))\n" "svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))\n" "svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))\n" "svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))\n" "svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))\n" "svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))\n" "svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))\n" "svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))\n" "svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))\n" "svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))\n" "svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))\n" "svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))\n" "svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))\n" "svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))\n" "svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))\n" "svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))\n" "svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))\n" "svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))\n" "svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))\n" "svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))\n" "svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))\n" "svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))\n" "svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))\n" "svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))\n" "svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))\n" "svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))\n" "svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))\n" "svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))\n" "svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))\n" "svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))\n" "svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))\n" "svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))\n" "svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))\n" "svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))\n" "svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))\n" "svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))\n" "svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))\n" "svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))\n" "svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))\n" "svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))\n" "svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))\n" "svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))\n" "svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))\n" "svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))\n" "svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))\n" "svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))\n" "svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))\n" "svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))\n" "svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))\n" "svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))\n" "svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))\n" "svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))\n" "svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))\n" "svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))\n" "svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))\n" "svint16_t svrshrnb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))\n" "svint32_t svrshrnb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))\n" "svint8_t svrshrnb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))\n" "svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))\n" "svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))\n" "svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))\n" "svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))\n" "svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))\n" "svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))\n" "svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))\n" "svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))\n" "svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))\n" "svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))\n" "svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))\n" "svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))\n" "svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))\n" "svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))\n" "svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))\n" "svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))\n" "svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))\n" "svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))\n" "svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))\n" "svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))\n" "svint16_t svrsubhnb_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))\n" "svint32_t svrsubhnb_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))\n" "svint8_t svrsubhnb_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))\n" "svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))\n" "svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))\n" "svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))\n" "svint16_t svrsubhnb_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))\n" "svint32_t svrsubhnb_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))\n" "svint8_t svrsubhnb_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))\n" "svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))\n" "svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))\n" "svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))\n" "svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))\n" "svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))\n" "svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))\n" "svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))\n" "svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))\n" "svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))\n" "svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))\n" "svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))\n" "svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))\n" "svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))\n" "svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))\n" "svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))\n" "svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))\n" "svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))\n" "svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))\n" "svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))\n" "svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))\n" "svint32_t svshllb_n_s32(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))\n" "svint64_t svshllb_n_s64(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))\n" "svint16_t svshllb_n_s16(svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))\n" "svuint32_t svshllb_n_u32(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))\n" "svuint64_t svshllb_n_u64(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))\n" "svuint16_t svshllb_n_u16(svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))\n" "svint32_t svshllt_n_s32(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))\n" "svint64_t svshllt_n_s64(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))\n" "svint16_t svshllt_n_s16(svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))\n" "svuint32_t svshllt_n_u32(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))\n" "svuint64_t svshllt_n_u64(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))\n" "svuint16_t svshllt_n_u16(svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))\n" "svuint16_t svshrnb_n_u32(svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))\n" "svuint32_t svshrnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))\n" "svuint8_t svshrnb_n_u16(svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))\n" "svint16_t svshrnb_n_s32(svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))\n" "svint32_t svshrnb_n_s64(svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))\n" "svint8_t svshrnb_n_s16(svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))\n" "svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))\n" "svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))\n" "svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))\n" "svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))\n" "svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))\n" "svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))\n" "svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))\n" "svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))\n" "svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))\n" "svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))\n" "svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))\n" "svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))\n" "svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))\n" "svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))\n" "svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))\n" "svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))\n" "svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))\n" "svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))\n" "svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))\n" "svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))\n" "svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))\n" "svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))\n" "svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))\n" "svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))\n" "svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))\n" "svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))\n" "svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))\n" "svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))\n" "svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))\n" "svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))\n" "svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))\n" "svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))\n" "svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))\n" "svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))\n" "svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))\n" "svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))\n" "svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))\n" "svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))\n" "svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))\n" "svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))\n" "svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))\n" "svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))\n" "svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))\n" "svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))\n" "svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))\n" "svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))\n" "svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))\n" "svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))\n" "svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))\n" "svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))\n" "svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))\n" "svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))\n" "svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))\n" "svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))\n" "void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))\n" "void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))\n" "void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))\n" "void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))\n" "void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))\n" "void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))\n" "void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))\n" "void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))\n" "void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))\n" "void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))\n" "void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))\n" "void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))\n" "void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))\n" "void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))\n" "void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))\n" "void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))\n" "void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))\n" "void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))\n" "void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))\n" "void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))\n" "void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))\n" "void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))\n" "void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))\n" "void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))\n" "void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))\n" "void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))\n" "void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))\n" "void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))\n" "void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))\n" "void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))\n" "void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))\n" "void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))\n" "void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))\n" "void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))\n" "void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))\n" "void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))\n" "void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))\n" "void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))\n" "void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))\n" "void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))\n" "void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))\n" "void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))\n" "void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))\n" "void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))\n" "void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))\n" "void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))\n" "void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))\n" "void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))\n" "void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))\n" "void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))\n" "void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))\n" "void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))\n" "void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))\n" "void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))\n" "void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))\n" "void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))\n" "void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))\n" "void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))\n" "void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))\n" "void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))\n" "void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))\n" "void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))\n" "void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))\n" "void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))\n" "void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))\n" "void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))\n" "void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))\n" "void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))\n" "void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))\n" "void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))\n" "void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))\n" "void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))\n" "void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))\n" "void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))\n" "void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))\n" "void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))\n" "void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))\n" "void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))\n" "void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))\n" "void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))\n" "void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))\n" "void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))\n" "void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))\n" "svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))\n" "svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))\n" "svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))\n" "svint16_t svsubhnb_n_s32(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))\n" "svint32_t svsubhnb_n_s64(svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))\n" "svint8_t svsubhnb_n_s16(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))\n" "svuint16_t svsubhnb_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))\n" "svuint32_t svsubhnb_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))\n" "svuint8_t svsubhnb_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))\n" "svint16_t svsubhnb_s32(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))\n" "svint32_t svsubhnb_s64(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))\n" "svint8_t svsubhnb_s16(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))\n" "svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))\n" "svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))\n" "svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))\n" "svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))\n" "svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))\n" "svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))\n" "svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))\n" "svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))\n" "svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))\n" "svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))\n" "svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))\n" "svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))\n" "svint32_t svsublb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))\n" "svint64_t svsublb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))\n" "svint16_t svsublb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))\n" "svuint32_t svsublb_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))\n" "svuint64_t svsublb_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))\n" "svuint16_t svsublb_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))\n" "svint32_t svsublb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))\n" "svint64_t svsublb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))\n" "svint16_t svsublb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))\n" "svuint32_t svsublb_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))\n" "svuint64_t svsublb_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))\n" "svuint16_t svsublb_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))\n" "svint32_t svsublbt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))\n" "svint64_t svsublbt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))\n" "svint16_t svsublbt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))\n" "svint32_t svsublbt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))\n" "svint64_t svsublbt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))\n" "svint16_t svsublbt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))\n" "svint32_t svsublt_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))\n" "svint64_t svsublt_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))\n" "svint16_t svsublt_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))\n" "svuint32_t svsublt_n_u32(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))\n" "svuint64_t svsublt_n_u64(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))\n" "svuint16_t svsublt_n_u16(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))\n" "svint32_t svsublt_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))\n" "svint64_t svsublt_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))\n" "svint16_t svsublt_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))\n" "svuint32_t svsublt_u32(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))\n" "svuint64_t svsublt_u64(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))\n" "svuint16_t svsublt_u16(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))\n" "svint32_t svsubltb_n_s32(svint16_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))\n" "svint64_t svsubltb_n_s64(svint32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))\n" "svint16_t svsubltb_n_s16(svint8_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))\n" "svint32_t svsubltb_s32(svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))\n" "svint64_t svsubltb_s64(svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))\n" "svint16_t svsubltb_s16(svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))\n" "svint32_t svsubwb_n_s32(svint32_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))\n" "svint64_t svsubwb_n_s64(svint64_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))\n" "svint16_t svsubwb_n_s16(svint16_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))\n" "svuint32_t svsubwb_n_u32(svuint32_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))\n" "svuint64_t svsubwb_n_u64(svuint64_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))\n" "svuint16_t svsubwb_n_u16(svuint16_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))\n" "svint32_t svsubwb_s32(svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))\n" "svint64_t svsubwb_s64(svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))\n" "svint16_t svsubwb_s16(svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))\n" "svuint32_t svsubwb_u32(svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))\n" "svuint64_t svsubwb_u64(svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))\n" "svuint16_t svsubwb_u16(svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))\n" "svint32_t svsubwt_n_s32(svint32_t, int16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))\n" "svint64_t svsubwt_n_s64(svint64_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))\n" "svint16_t svsubwt_n_s16(svint16_t, int8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))\n" "svuint32_t svsubwt_n_u32(svuint32_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))\n" "svuint64_t svsubwt_n_u64(svuint64_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))\n" "svuint16_t svsubwt_n_u16(svuint16_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))\n" "svint32_t svsubwt_s32(svint32_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))\n" "svint64_t svsubwt_s64(svint64_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))\n" "svint16_t svsubwt_s16(svint16_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))\n" "svuint32_t svsubwt_u32(svuint32_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))\n" "svuint64_t svsubwt_u64(svuint64_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))\n" "svuint16_t svsubwt_u16(svuint16_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))\n" "svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))\n" "svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))\n" "svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))\n" "svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))\n" "svint8_t svtbl2_s8(svint8x2_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))\n" "svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))\n" "svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))\n" "svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))\n" "svint32_t svtbl2_s32(svint32x2_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))\n" "svint64_t svtbl2_s64(svint64x2_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))\n" "svint16_t svtbl2_s16(svint16x2_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))\n" "svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))\n" "svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))\n" "svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))\n" "svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))\n" "svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))\n" "svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))\n" "svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))\n" "svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))\n" "svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))\n" "svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))\n" "svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))\n" "svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))\n" "svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))\n" "svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))\n" "svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))\n" "svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))\n" "svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))\n" "svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))\n" "svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))\n" "svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))\n" "svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))\n" "svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))\n" "svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))\n" "svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))\n" "svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))\n" "svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))\n" "svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))\n" "svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))\n" "svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))\n" "svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))\n" "svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))\n" "svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))\n" "svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))\n" "svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))\n" "svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))\n" "svbool_t svwhilege_b8_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))\n" "svbool_t svwhilege_b32_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))\n" "svbool_t svwhilege_b64_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))\n" "svbool_t svwhilege_b16_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))\n" "svbool_t svwhilege_b8_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))\n" "svbool_t svwhilege_b32_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))\n" "svbool_t svwhilege_b64_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))\n" "svbool_t svwhilege_b16_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))\n" "svbool_t svwhilege_b8_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))\n" "svbool_t svwhilege_b32_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))\n" "svbool_t svwhilege_b64_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))\n" "svbool_t svwhilege_b16_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))\n" "svbool_t svwhilege_b8_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))\n" "svbool_t svwhilege_b32_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))\n" "svbool_t svwhilege_b64_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))\n" "svbool_t svwhilege_b16_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))\n" "svbool_t svwhilegt_b8_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))\n" "svbool_t svwhilegt_b32_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))\n" "svbool_t svwhilegt_b64_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))\n" "svbool_t svwhilegt_b16_s32(int32_t, int32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))\n" "svbool_t svwhilegt_b8_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))\n" "svbool_t svwhilegt_b32_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))\n" "svbool_t svwhilegt_b64_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))\n" "svbool_t svwhilegt_b16_s64(int64_t, int64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))\n" "svbool_t svwhilegt_b8_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))\n" "svbool_t svwhilegt_b32_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))\n" "svbool_t svwhilegt_b64_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))\n" "svbool_t svwhilegt_b16_u32(uint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))\n" "svbool_t svwhilegt_b8_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))\n" "svbool_t svwhilegt_b32_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))\n" "svbool_t svwhilegt_b64_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))\n" "svbool_t svwhilegt_b16_u64(uint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))\n" "svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))\n" "svbool_t svwhilerw_s8(int8_t const *, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))\n" "svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))\n" "svbool_t svwhilerw_f64(float64_t const *, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))\n" "svbool_t svwhilerw_s64(int64_t const *, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))\n" "svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))\n" "svbool_t svwhilerw_f16(float16_t const *, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))\n" "svbool_t svwhilerw_s16(int16_t const *, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))\n" "svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))\n" "svbool_t svwhilerw_f32(float32_t const *, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))\n" "svbool_t svwhilerw_s32(int32_t const *, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))\n" "svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))\n" "svbool_t svwhilewr_s8(int8_t const *, int8_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))\n" "svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))\n" "svbool_t svwhilewr_f64(float64_t const *, float64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))\n" "svbool_t svwhilewr_s64(int64_t const *, int64_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))\n" "svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))\n" "svbool_t svwhilewr_f16(float16_t const *, float16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))\n" "svbool_t svwhilewr_s16(int16_t const *, int16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))\n" "svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))\n" "svbool_t svwhilewr_f32(float32_t const *, float32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))\n" "svbool_t svwhilewr_s32(int32_t const *, int32_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))\n" "svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))\n" "svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))\n" "svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))\n" "svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))\n" "svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))\n" "svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))\n" "svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))\n" "svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))\n" "svint8_t svaba(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))\n" "svint32_t svaba(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))\n" "svint64_t svaba(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))\n" "svint16_t svaba(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))\n" "svuint8_t svaba(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))\n" "svuint32_t svaba(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))\n" "svuint64_t svaba(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))\n" "svuint16_t svaba(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))\n" "svint8_t svaba(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))\n" "svint32_t svaba(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))\n" "svint64_t svaba(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))\n" "svint16_t svaba(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))\n" "svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))\n" "svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))\n" "svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))\n" "svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))\n" "svint32_t svabalb(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))\n" "svint64_t svabalb(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))\n" "svint16_t svabalb(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))\n" "svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))\n" "svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))\n" "svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))\n" "svint32_t svabalb(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))\n" "svint64_t svabalb(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))\n" "svint16_t svabalb(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))\n" "svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))\n" "svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))\n" "svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))\n" "svint32_t svabalt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))\n" "svint64_t svabalt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))\n" "svint16_t svabalt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))\n" "svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))\n" "svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))\n" "svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))\n" "svint32_t svabalt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))\n" "svint64_t svabalt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))\n" "svint16_t svabalt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))\n" "svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))\n" "svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))\n" "svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))\n" "svint32_t svabdlb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))\n" "svint64_t svabdlb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))\n" "svint16_t svabdlb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))\n" "svuint32_t svabdlb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))\n" "svuint64_t svabdlb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))\n" "svuint16_t svabdlb(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))\n" "svint32_t svabdlb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))\n" "svint64_t svabdlb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))\n" "svint16_t svabdlb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))\n" "svuint32_t svabdlb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))\n" "svuint64_t svabdlb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))\n" "svuint16_t svabdlb(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))\n" "svint32_t svabdlt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))\n" "svint64_t svabdlt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))\n" "svint16_t svabdlt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))\n" "svuint32_t svabdlt(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))\n" "svuint64_t svabdlt(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))\n" "svuint16_t svabdlt(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))\n" "svint32_t svabdlt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))\n" "svint64_t svabdlt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))\n" "svint16_t svabdlt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))\n" "svuint32_t svabdlt(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))\n" "svuint64_t svabdlt(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))\n" "svuint16_t svabdlt(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))\n" "svint32_t svadalp_m(svbool_t, svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))\n" "svint64_t svadalp_m(svbool_t, svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))\n" "svint16_t svadalp_m(svbool_t, svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))\n" "svint32_t svadalp_x(svbool_t, svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))\n" "svint64_t svadalp_x(svbool_t, svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))\n" "svint16_t svadalp_x(svbool_t, svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))\n" "svint32_t svadalp_z(svbool_t, svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))\n" "svint64_t svadalp_z(svbool_t, svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))\n" "svint16_t svadalp_z(svbool_t, svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))\n" "svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))\n" "svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))\n" "svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))\n" "svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))\n" "svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))\n" "svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))\n" "svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))\n" "svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))\n" "svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))\n" "svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))\n" "svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))\n" "svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))\n" "svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))\n" "svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))\n" "svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))\n" "svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))\n" "svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))\n" "svuint16_t svaddhnb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))\n" "svuint32_t svaddhnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))\n" "svuint8_t svaddhnb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))\n" "svint16_t svaddhnb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))\n" "svint32_t svaddhnb(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))\n" "svint8_t svaddhnb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))\n" "svuint16_t svaddhnb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))\n" "svuint32_t svaddhnb(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))\n" "svuint8_t svaddhnb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))\n" "svint16_t svaddhnb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))\n" "svint32_t svaddhnb(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))\n" "svint8_t svaddhnb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))\n" "svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))\n" "svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))\n" "svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))\n" "svint16_t svaddhnt(svint16_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))\n" "svint32_t svaddhnt(svint32_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))\n" "svint8_t svaddhnt(svint8_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))\n" "svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))\n" "svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))\n" "svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))\n" "svint16_t svaddhnt(svint16_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))\n" "svint32_t svaddhnt(svint32_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))\n" "svint8_t svaddhnt(svint8_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))\n" "svint32_t svaddlb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))\n" "svint64_t svaddlb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))\n" "svint16_t svaddlb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))\n" "svuint32_t svaddlb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))\n" "svuint64_t svaddlb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))\n" "svuint16_t svaddlb(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))\n" "svint32_t svaddlb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))\n" "svint64_t svaddlb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))\n" "svint16_t svaddlb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))\n" "svuint32_t svaddlb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))\n" "svuint64_t svaddlb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))\n" "svuint16_t svaddlb(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))\n" "svint32_t svaddlbt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))\n" "svint64_t svaddlbt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))\n" "svint16_t svaddlbt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))\n" "svint32_t svaddlbt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))\n" "svint64_t svaddlbt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))\n" "svint16_t svaddlbt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))\n" "svint32_t svaddlt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))\n" "svint64_t svaddlt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))\n" "svint16_t svaddlt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))\n" "svuint32_t svaddlt(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))\n" "svuint64_t svaddlt(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))\n" "svuint16_t svaddlt(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))\n" "svint32_t svaddlt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))\n" "svint64_t svaddlt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))\n" "svint16_t svaddlt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))\n" "svuint32_t svaddlt(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))\n" "svuint64_t svaddlt(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))\n" "svuint16_t svaddlt(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))\n" "svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))\n" "svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))\n" "svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))\n" "svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))\n" "svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))\n" "svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))\n" "svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))\n" "svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))\n" "svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))\n" "svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))\n" "svint8_t svaddp_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))\n" "svint32_t svaddp_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))\n" "svint64_t svaddp_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))\n" "svint16_t svaddp_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))\n" "svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))\n" "svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))\n" "svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))\n" "svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))\n" "svint8_t svaddp_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))\n" "svint32_t svaddp_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))\n" "svint64_t svaddp_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))\n" "svint16_t svaddp_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))\n" "svint32_t svaddwb(svint32_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))\n" "svint64_t svaddwb(svint64_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))\n" "svint16_t svaddwb(svint16_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))\n" "svuint32_t svaddwb(svuint32_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))\n" "svuint64_t svaddwb(svuint64_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))\n" "svuint16_t svaddwb(svuint16_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))\n" "svint32_t svaddwb(svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))\n" "svint64_t svaddwb(svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))\n" "svint16_t svaddwb(svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))\n" "svuint32_t svaddwb(svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))\n" "svuint64_t svaddwb(svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))\n" "svuint16_t svaddwb(svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))\n" "svint32_t svaddwt(svint32_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))\n" "svint64_t svaddwt(svint64_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))\n" "svint16_t svaddwt(svint16_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))\n" "svuint32_t svaddwt(svuint32_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))\n" "svuint64_t svaddwt(svuint64_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))\n" "svuint16_t svaddwt(svuint16_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))\n" "svint32_t svaddwt(svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))\n" "svint64_t svaddwt(svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))\n" "svint16_t svaddwt(svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))\n" "svuint32_t svaddwt(svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))\n" "svuint64_t svaddwt(svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))\n" "svuint16_t svaddwt(svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))\n" "svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))\n" "svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))\n" "svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))\n" "svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))\n" "svint8_t svbcax(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))\n" "svint32_t svbcax(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))\n" "svint64_t svbcax(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))\n" "svint16_t svbcax(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))\n" "svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))\n" "svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))\n" "svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))\n" "svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))\n" "svint8_t svbcax(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))\n" "svint32_t svbcax(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))\n" "svint64_t svbcax(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))\n" "svint16_t svbcax(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))\n" "svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))\n" "svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))\n" "svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))\n" "svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))\n" "svint8_t svbsl1n(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))\n" "svint32_t svbsl1n(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))\n" "svint64_t svbsl1n(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))\n" "svint16_t svbsl1n(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))\n" "svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))\n" "svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))\n" "svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))\n" "svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))\n" "svint8_t svbsl1n(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))\n" "svint32_t svbsl1n(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))\n" "svint64_t svbsl1n(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))\n" "svint16_t svbsl1n(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))\n" "svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))\n" "svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))\n" "svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))\n" "svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))\n" "svint8_t svbsl2n(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))\n" "svint32_t svbsl2n(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))\n" "svint64_t svbsl2n(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))\n" "svint16_t svbsl2n(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))\n" "svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))\n" "svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))\n" "svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))\n" "svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))\n" "svint8_t svbsl2n(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))\n" "svint32_t svbsl2n(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))\n" "svint64_t svbsl2n(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))\n" "svint16_t svbsl2n(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))\n" "svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))\n" "svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))\n" "svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))\n" "svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))\n" "svint8_t svbsl(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))\n" "svint32_t svbsl(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))\n" "svint64_t svbsl(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))\n" "svint16_t svbsl(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))\n" "svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))\n" "svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))\n" "svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))\n" "svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))\n" "svint8_t svbsl(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))\n" "svint32_t svbsl(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))\n" "svint64_t svbsl(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))\n" "svint16_t svbsl(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))\n" "svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))\n" "svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))\n" "svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))\n" "svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))\n" "svint8_t svcadd(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))\n" "svint32_t svcadd(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))\n" "svint64_t svcadd(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))\n" "svint16_t svcadd(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))\n" "svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))\n" "svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))\n" "svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))\n" "svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))\n" "svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))\n" "svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))\n" "svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))\n" "svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))\n" "svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))\n" "svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))\n" "svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))\n" "svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))\n" "svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))\n" "svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))\n" "svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))\n" "svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))\n" "svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))\n" "svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))\n" "svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))\n" "svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))\n" "svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))\n" "svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))\n" "svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))\n" "svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))\n" "svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))\n" "svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))\n" "svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))\n" "svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))\n" "svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))\n" "svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))\n" "svint8_t sveor3(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))\n" "svint32_t sveor3(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))\n" "svint64_t sveor3(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))\n" "svint16_t sveor3(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))\n" "svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))\n" "svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))\n" "svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))\n" "svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))\n" "svint8_t sveor3(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))\n" "svint32_t sveor3(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))\n" "svint64_t sveor3(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))\n" "svint16_t sveor3(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))\n" "svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))\n" "svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))\n" "svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))\n" "svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))\n" "svint8_t sveorbt(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))\n" "svint32_t sveorbt(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))\n" "svint64_t sveorbt(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))\n" "svint16_t sveorbt(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))\n" "svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))\n" "svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))\n" "svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))\n" "svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))\n" "svint8_t sveorbt(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))\n" "svint32_t sveorbt(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))\n" "svint64_t sveorbt(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))\n" "svint16_t sveorbt(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))\n" "svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))\n" "svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))\n" "svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))\n" "svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))\n" "svint8_t sveortb(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))\n" "svint32_t sveortb(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))\n" "svint64_t sveortb(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))\n" "svint16_t sveortb(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))\n" "svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))\n" "svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))\n" "svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))\n" "svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))\n" "svint8_t sveortb(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))\n" "svint32_t sveortb(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))\n" "svint64_t sveortb(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))\n" "svint16_t sveortb(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))\n" "svint8_t svhadd_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))\n" "svint32_t svhadd_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))\n" "svint64_t svhadd_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))\n" "svint16_t svhadd_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))\n" "svint8_t svhadd_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))\n" "svint32_t svhadd_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))\n" "svint64_t svhadd_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))\n" "svint16_t svhadd_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))\n" "svint8_t svhadd_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))\n" "svint32_t svhadd_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))\n" "svint64_t svhadd_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))\n" "svint16_t svhadd_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))\n" "svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))\n" "svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))\n" "svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))\n" "svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))\n" "svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))\n" "svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))\n" "svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))\n" "svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))\n" "svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))\n" "svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))\n" "svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))\n" "svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))\n" "svint8_t svhadd_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))\n" "svint32_t svhadd_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))\n" "svint64_t svhadd_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))\n" "svint16_t svhadd_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))\n" "svint8_t svhadd_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))\n" "svint32_t svhadd_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))\n" "svint64_t svhadd_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))\n" "svint16_t svhadd_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))\n" "svint8_t svhadd_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))\n" "svint32_t svhadd_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))\n" "svint64_t svhadd_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))\n" "svint16_t svhadd_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))\n" "svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))\n" "svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))\n" "svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))\n" "svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))\n" "svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))\n" "svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))\n" "svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))\n" "svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))\n" "svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))\n" "svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))\n" "svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))\n" "svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))\n" "svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))\n" "svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))\n" "svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))\n" "svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))\n" "svuint8_t svhistseg(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))\n" "svuint8_t svhistseg(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))\n" "svint8_t svhsub_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))\n" "svint32_t svhsub_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))\n" "svint64_t svhsub_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))\n" "svint16_t svhsub_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))\n" "svint8_t svhsub_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))\n" "svint32_t svhsub_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))\n" "svint64_t svhsub_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))\n" "svint16_t svhsub_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))\n" "svint8_t svhsub_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))\n" "svint32_t svhsub_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))\n" "svint64_t svhsub_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))\n" "svint16_t svhsub_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))\n" "svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))\n" "svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))\n" "svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))\n" "svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))\n" "svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))\n" "svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))\n" "svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))\n" "svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))\n" "svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))\n" "svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))\n" "svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))\n" "svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))\n" "svint8_t svhsub_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))\n" "svint32_t svhsub_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))\n" "svint64_t svhsub_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))\n" "svint16_t svhsub_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))\n" "svint8_t svhsub_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))\n" "svint32_t svhsub_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))\n" "svint64_t svhsub_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))\n" "svint16_t svhsub_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))\n" "svint8_t svhsub_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))\n" "svint32_t svhsub_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))\n" "svint64_t svhsub_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))\n" "svint16_t svhsub_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))\n" "svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))\n" "svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))\n" "svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))\n" "svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))\n" "svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))\n" "svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))\n" "svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))\n" "svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))\n" "svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))\n" "svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))\n" "svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))\n" "svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))\n" "svint8_t svhsubr_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))\n" "svint32_t svhsubr_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))\n" "svint64_t svhsubr_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))\n" "svint16_t svhsubr_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))\n" "svint8_t svhsubr_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))\n" "svint32_t svhsubr_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))\n" "svint64_t svhsubr_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))\n" "svint16_t svhsubr_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))\n" "svint8_t svhsubr_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))\n" "svint32_t svhsubr_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))\n" "svint64_t svhsubr_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))\n" "svint16_t svhsubr_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))\n" "svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))\n" "svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))\n" "svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))\n" "svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))\n" "svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))\n" "svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))\n" "svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))\n" "svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))\n" "svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))\n" "svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))\n" "svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))\n" "svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))\n" "svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))\n" "svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))\n" "svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))\n" "svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))\n" "svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))\n" "svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))\n" "svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))\n" "svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))\n" "svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))\n" "svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))\n" "svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))\n" "svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))\n" "svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))\n" "svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))\n" "svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))\n" "svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))\n" "svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))\n" "svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))\n" "svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))\n" "svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))\n" "svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))\n" "svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))\n" "svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))\n" "svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))\n" "svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))\n" "svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))\n" "svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))\n" "svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))\n" "svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))\n" "svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))\n" "svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))\n" "svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))\n" "svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))\n" "svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))\n" "svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))\n" "svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))\n" "svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))\n" "svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))\n" "svint32_t svldnt1_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))\n" "svint64_t svldnt1_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))\n" "svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))\n" "svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))\n" "svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))\n" "svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))\n" "svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))\n" "svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))\n" "svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))\n" "svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))\n" "svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))\n" "svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))\n" "svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))\n" "svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))\n" "svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))\n" "svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))\n" "svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))\n" "svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))\n" "svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))\n" "svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))\n" "svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))\n" "svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))\n" "svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))\n" "svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))\n" "svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))\n" "svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))\n" "svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))\n" "svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))\n" "svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))\n" "svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))\n" "svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))\n" "svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))\n" "svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))\n" "svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))\n" "svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))\n" "svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))\n" "svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))\n" "svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))\n" "svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))\n" "svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))\n" "svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))\n" "svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))\n" "svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))\n" "svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))\n" "svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))\n" "svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))\n" "svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))\n" "svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))\n" "svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))\n" "svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))\n" "svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))\n" "svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))\n" "svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))\n" "svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))\n" "svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))\n" "svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))\n" "svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))\n" "svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))\n" "svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))\n" "svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))\n" "svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))\n" "svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))\n" "svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))\n" "svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))\n" "svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))\n" "svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))\n" "svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))\n" "svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))\n" "svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))\n" "svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))\n" "svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))\n" "svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))\n" "svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))\n" "svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))\n" "svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))\n" "svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))\n" "svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))\n" "svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))\n" "svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))\n" "svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))\n" "svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))\n" "svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))\n" "svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))\n" "svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))\n" "svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))\n" "svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))\n" "svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))\n" "svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))\n" "svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))\n" "svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))\n" "svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))\n" "svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))\n" "svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))\n" "svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))\n" "svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))\n" "svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))\n" "svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))\n" "svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))\n" "svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))\n" "svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))\n" "svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))\n" "svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))\n" "svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))\n" "svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))\n" "svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))\n" "svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))\n" "svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))\n" "svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))\n" "svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))\n" "svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))\n" "svint64_t svlogb_x(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))\n" "svint32_t svlogb_x(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))\n" "svint16_t svlogb_x(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))\n" "svint64_t svlogb_z(svbool_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))\n" "svint32_t svlogb_z(svbool_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))\n" "svint16_t svlogb_z(svbool_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))\n" "svbool_t svmatch(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))\n" "svbool_t svmatch(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))\n" "svbool_t svmatch(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))\n" "svbool_t svmatch(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))\n" "svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))\n" "svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))\n" "svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))\n" "svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))\n" "svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))\n" "svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))\n" "svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))\n" "svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))\n" "svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))\n" "svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))\n" "svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))\n" "svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))\n" "svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))\n" "svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))\n" "svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))\n" "svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))\n" "svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))\n" "svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))\n" "svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))\n" "svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))\n" "svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))\n" "svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))\n" "svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))\n" "svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))\n" "svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))\n" "svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))\n" "svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))\n" "svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))\n" "svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))\n" "svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))\n" "svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))\n" "svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))\n" "svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))\n" "svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))\n" "svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))\n" "svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))\n" "svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))\n" "svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))\n" "svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))\n" "svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))\n" "svint8_t svminp_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))\n" "svint32_t svminp_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))\n" "svint64_t svminp_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))\n" "svint16_t svminp_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))\n" "svint8_t svminp_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))\n" "svint32_t svminp_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))\n" "svint64_t svminp_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))\n" "svint16_t svminp_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))\n" "svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))\n" "svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))\n" "svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))\n" "svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))\n" "svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))\n" "svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))\n" "svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))\n" "svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))\n" "svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))\n" "svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))\n" "svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))\n" "svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))\n" "svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))\n" "svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))\n" "svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))\n" "svint32_t svmlalb(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))\n" "svint64_t svmlalb(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))\n" "svint16_t svmlalb(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))\n" "svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))\n" "svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))\n" "svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))\n" "svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))\n" "svint32_t svmlalb(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))\n" "svint64_t svmlalb(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))\n" "svint16_t svmlalb(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))\n" "svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))\n" "svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))\n" "svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))\n" "svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))\n" "svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))\n" "svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))\n" "svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))\n" "svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))\n" "svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))\n" "svint32_t svmlalt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))\n" "svint64_t svmlalt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))\n" "svint16_t svmlalt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))\n" "svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))\n" "svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))\n" "svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))\n" "svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))\n" "svint32_t svmlalt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))\n" "svint64_t svmlalt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))\n" "svint16_t svmlalt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))\n" "svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))\n" "svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))\n" "svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))\n" "svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))\n" "svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))\n" "svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))\n" "svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))\n" "svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))\n" "svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))\n" "svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))\n" "svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))\n" "svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))\n" "svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))\n" "svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))\n" "svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))\n" "svint32_t svmlslb(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))\n" "svint64_t svmlslb(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))\n" "svint16_t svmlslb(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))\n" "svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))\n" "svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))\n" "svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))\n" "svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))\n" "svint32_t svmlslb(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))\n" "svint64_t svmlslb(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))\n" "svint16_t svmlslb(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))\n" "svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))\n" "svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))\n" "svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))\n" "svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))\n" "svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))\n" "svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))\n" "svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))\n" "svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))\n" "svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))\n" "svint32_t svmlslt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))\n" "svint64_t svmlslt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))\n" "svint16_t svmlslt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))\n" "svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))\n" "svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))\n" "svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))\n" "svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))\n" "svint32_t svmlslt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))\n" "svint64_t svmlslt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))\n" "svint16_t svmlslt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))\n" "svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))\n" "svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))\n" "svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))\n" "svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))\n" "svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))\n" "svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))\n" "svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))\n" "svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))\n" "svint32_t svmovlb(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))\n" "svint64_t svmovlb(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))\n" "svint16_t svmovlb(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))\n" "svuint32_t svmovlb(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))\n" "svuint64_t svmovlb(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))\n" "svuint16_t svmovlb(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))\n" "svint32_t svmovlt(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))\n" "svint64_t svmovlt(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))\n" "svint16_t svmovlt(svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))\n" "svuint32_t svmovlt(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))\n" "svuint64_t svmovlt(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))\n" "svuint16_t svmovlt(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))\n" "svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))\n" "svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))\n" "svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))\n" "svint32_t svmul_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))\n" "svint64_t svmul_lane(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))\n" "svint16_t svmul_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))\n" "svint32_t svmullb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))\n" "svint64_t svmullb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))\n" "svint16_t svmullb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))\n" "svuint32_t svmullb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))\n" "svuint64_t svmullb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))\n" "svuint16_t svmullb(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))\n" "svint32_t svmullb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))\n" "svint64_t svmullb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))\n" "svint16_t svmullb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))\n" "svuint32_t svmullb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))\n" "svuint64_t svmullb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))\n" "svuint16_t svmullb(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))\n" "svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))\n" "svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))\n" "svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))\n" "svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))\n" "svint32_t svmullt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))\n" "svint64_t svmullt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))\n" "svint16_t svmullt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))\n" "svuint32_t svmullt(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))\n" "svuint64_t svmullt(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))\n" "svuint16_t svmullt(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))\n" "svint32_t svmullt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))\n" "svint64_t svmullt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))\n" "svint16_t svmullt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))\n" "svuint32_t svmullt(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))\n" "svuint64_t svmullt(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))\n" "svuint16_t svmullt(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))\n" "svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))\n" "svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))\n" "svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))\n" "svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))\n" "svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))\n" "svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))\n" "svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))\n" "svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))\n" "svint8_t svnbsl(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))\n" "svint32_t svnbsl(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))\n" "svint64_t svnbsl(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))\n" "svint16_t svnbsl(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))\n" "svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))\n" "svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))\n" "svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))\n" "svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))\n" "svint8_t svnbsl(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))\n" "svint32_t svnbsl(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))\n" "svint64_t svnbsl(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))\n" "svint16_t svnbsl(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))\n" "svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))\n" "svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))\n" "svbool_t svnmatch(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))\n" "svbool_t svnmatch(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))\n" "svuint8_t svpmul(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))\n" "svuint8_t svpmul(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))\n" "svuint64_t svpmullb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))\n" "svuint16_t svpmullb(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))\n" "svuint64_t svpmullb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))\n" "svuint16_t svpmullb(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))\n" "svuint8_t svpmullb_pair(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))\n" "svuint32_t svpmullb_pair(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))\n" "svuint8_t svpmullb_pair(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))\n" "svuint32_t svpmullb_pair(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))\n" "svuint64_t svpmullt(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))\n" "svuint16_t svpmullt(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))\n" "svuint64_t svpmullt(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))\n" "svuint16_t svpmullt(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))\n" "svuint8_t svpmullt_pair(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))\n" "svuint32_t svpmullt_pair(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))\n" "svuint8_t svpmullt_pair(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))\n" "svuint32_t svpmullt_pair(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))\n" "svint8_t svqabs_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))\n" "svint32_t svqabs_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))\n" "svint64_t svqabs_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))\n" "svint16_t svqabs_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))\n" "svint8_t svqabs_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))\n" "svint32_t svqabs_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))\n" "svint64_t svqabs_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))\n" "svint16_t svqabs_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))\n" "svint8_t svqabs_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))\n" "svint32_t svqabs_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))\n" "svint64_t svqabs_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))\n" "svint16_t svqabs_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))\n" "svint8_t svqadd_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))\n" "svint32_t svqadd_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))\n" "svint64_t svqadd_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))\n" "svint16_t svqadd_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))\n" "svint8_t svqadd_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))\n" "svint32_t svqadd_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))\n" "svint64_t svqadd_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))\n" "svint16_t svqadd_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))\n" "svint8_t svqadd_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))\n" "svint32_t svqadd_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))\n" "svint64_t svqadd_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))\n" "svint16_t svqadd_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))\n" "svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))\n" "svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))\n" "svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))\n" "svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))\n" "svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))\n" "svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))\n" "svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))\n" "svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))\n" "svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))\n" "svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))\n" "svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))\n" "svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))\n" "svint8_t svqadd_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))\n" "svint32_t svqadd_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))\n" "svint64_t svqadd_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))\n" "svint16_t svqadd_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))\n" "svint8_t svqadd_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))\n" "svint32_t svqadd_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))\n" "svint64_t svqadd_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))\n" "svint16_t svqadd_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))\n" "svint8_t svqadd_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))\n" "svint32_t svqadd_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))\n" "svint64_t svqadd_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))\n" "svint16_t svqadd_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))\n" "svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))\n" "svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))\n" "svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))\n" "svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))\n" "svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))\n" "svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))\n" "svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))\n" "svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))\n" "svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))\n" "svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))\n" "svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))\n" "svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))\n" "svint8_t svqcadd(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))\n" "svint32_t svqcadd(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))\n" "svint64_t svqcadd(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))\n" "svint16_t svqcadd(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))\n" "svint32_t svqdmlalb(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))\n" "svint64_t svqdmlalb(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))\n" "svint16_t svqdmlalb(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))\n" "svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))\n" "svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))\n" "svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))\n" "svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))\n" "svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))\n" "svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))\n" "svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))\n" "svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))\n" "svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))\n" "svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))\n" "svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))\n" "svint32_t svqdmlalt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))\n" "svint64_t svqdmlalt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))\n" "svint16_t svqdmlalt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))\n" "svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))\n" "svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))\n" "svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))\n" "svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))\n" "svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))\n" "svint32_t svqdmlslb(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))\n" "svint64_t svqdmlslb(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))\n" "svint16_t svqdmlslb(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))\n" "svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))\n" "svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))\n" "svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))\n" "svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))\n" "svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))\n" "svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))\n" "svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))\n" "svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))\n" "svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))\n" "svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))\n" "svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))\n" "svint32_t svqdmlslt(svint32_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))\n" "svint64_t svqdmlslt(svint64_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))\n" "svint16_t svqdmlslt(svint16_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))\n" "svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))\n" "svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))\n" "svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))\n" "svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))\n" "svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))\n" "svint8_t svqdmulh(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))\n" "svint32_t svqdmulh(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))\n" "svint64_t svqdmulh(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))\n" "svint16_t svqdmulh(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))\n" "svint8_t svqdmulh(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))\n" "svint32_t svqdmulh(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))\n" "svint64_t svqdmulh(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))\n" "svint16_t svqdmulh(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))\n" "svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))\n" "svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))\n" "svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))\n" "svint32_t svqdmullb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))\n" "svint64_t svqdmullb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))\n" "svint16_t svqdmullb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))\n" "svint32_t svqdmullb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))\n" "svint64_t svqdmullb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))\n" "svint16_t svqdmullb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))\n" "svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))\n" "svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))\n" "svint32_t svqdmullt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))\n" "svint64_t svqdmullt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))\n" "svint16_t svqdmullt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))\n" "svint32_t svqdmullt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))\n" "svint64_t svqdmullt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))\n" "svint16_t svqdmullt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))\n" "svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))\n" "svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))\n" "svint8_t svqneg_m(svint8_t, svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))\n" "svint32_t svqneg_m(svint32_t, svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))\n" "svint64_t svqneg_m(svint64_t, svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))\n" "svint16_t svqneg_m(svint16_t, svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))\n" "svint8_t svqneg_x(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))\n" "svint32_t svqneg_x(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))\n" "svint64_t svqneg_x(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))\n" "svint16_t svqneg_x(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))\n" "svint8_t svqneg_z(svbool_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))\n" "svint32_t svqneg_z(svbool_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))\n" "svint64_t svqneg_z(svbool_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))\n" "svint16_t svqneg_z(svbool_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))\n" "svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))\n" "svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))\n" "svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))\n" "svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))\n" "svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))\n" "svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))\n" "svint8_t svqrdmlah(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))\n" "svint32_t svqrdmlah(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))\n" "svint64_t svqrdmlah(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))\n" "svint16_t svqrdmlah(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))\n" "svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))\n" "svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))\n" "svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))\n" "svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))\n" "svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))\n" "svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))\n" "svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))\n" "svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))\n" "svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))\n" "svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))\n" "svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))\n" "svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))\n" "svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))\n" "svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))\n" "svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))\n" "svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))\n" "svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))\n" "svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))\n" "svint8_t svqrdmulh(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))\n" "svint32_t svqrdmulh(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))\n" "svint64_t svqrdmulh(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))\n" "svint16_t svqrdmulh(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))\n" "svint8_t svqrdmulh(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))\n" "svint32_t svqrdmulh(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))\n" "svint64_t svqrdmulh(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))\n" "svint16_t svqrdmulh(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))\n" "svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))\n" "svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))\n" "svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))\n" "svint8_t svqrshl_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))\n" "svint32_t svqrshl_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))\n" "svint64_t svqrshl_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))\n" "svint16_t svqrshl_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))\n" "svint8_t svqrshl_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))\n" "svint32_t svqrshl_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))\n" "svint64_t svqrshl_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))\n" "svint16_t svqrshl_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))\n" "svint8_t svqrshl_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))\n" "svint32_t svqrshl_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))\n" "svint64_t svqrshl_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))\n" "svint16_t svqrshl_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))\n" "svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))\n" "svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))\n" "svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))\n" "svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))\n" "svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))\n" "svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))\n" "svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))\n" "svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))\n" "svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))\n" "svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))\n" "svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))\n" "svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))\n" "svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))\n" "svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))\n" "svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))\n" "svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))\n" "svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))\n" "svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))\n" "svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))\n" "svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))\n" "svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))\n" "svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))\n" "svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))\n" "svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))\n" "svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))\n" "svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))\n" "svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))\n" "svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))\n" "svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))\n" "svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))\n" "svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))\n" "svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))\n" "svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))\n" "svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))\n" "svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))\n" "svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))\n" "svint16_t svqrshrnb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))\n" "svint32_t svqrshrnb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))\n" "svint8_t svqrshrnb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))\n" "svuint16_t svqrshrnb(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))\n" "svuint32_t svqrshrnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))\n" "svuint8_t svqrshrnb(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))\n" "svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))\n" "svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))\n" "svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))\n" "svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))\n" "svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))\n" "svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))\n" "svuint16_t svqrshrunb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))\n" "svuint32_t svqrshrunb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))\n" "svuint8_t svqrshrunb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))\n" "svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))\n" "svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))\n" "svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))\n" "svint8_t svqshl_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))\n" "svint32_t svqshl_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))\n" "svint64_t svqshl_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))\n" "svint16_t svqshl_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))\n" "svint8_t svqshl_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))\n" "svint32_t svqshl_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))\n" "svint64_t svqshl_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))\n" "svint16_t svqshl_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))\n" "svint8_t svqshl_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))\n" "svint32_t svqshl_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))\n" "svint64_t svqshl_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))\n" "svint16_t svqshl_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))\n" "svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))\n" "svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))\n" "svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))\n" "svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))\n" "svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))\n" "svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))\n" "svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))\n" "svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))\n" "svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))\n" "svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))\n" "svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))\n" "svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))\n" "svint8_t svqshl_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))\n" "svint32_t svqshl_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))\n" "svint64_t svqshl_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))\n" "svint16_t svqshl_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))\n" "svint8_t svqshl_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))\n" "svint32_t svqshl_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))\n" "svint64_t svqshl_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))\n" "svint16_t svqshl_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))\n" "svint8_t svqshl_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))\n" "svint32_t svqshl_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))\n" "svint64_t svqshl_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))\n" "svint16_t svqshl_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))\n" "svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))\n" "svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))\n" "svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))\n" "svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))\n" "svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))\n" "svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))\n" "svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))\n" "svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))\n" "svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))\n" "svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))\n" "svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))\n" "svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))\n" "svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))\n" "svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))\n" "svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))\n" "svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))\n" "svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))\n" "svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))\n" "svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))\n" "svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))\n" "svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))\n" "svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))\n" "svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))\n" "svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))\n" "svint16_t svqshrnb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))\n" "svint32_t svqshrnb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))\n" "svint8_t svqshrnb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))\n" "svuint16_t svqshrnb(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))\n" "svuint32_t svqshrnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))\n" "svuint8_t svqshrnb(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))\n" "svint16_t svqshrnt(svint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))\n" "svint32_t svqshrnt(svint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))\n" "svint8_t svqshrnt(svint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))\n" "svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))\n" "svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))\n" "svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))\n" "svuint16_t svqshrunb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))\n" "svuint32_t svqshrunb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))\n" "svuint8_t svqshrunb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))\n" "svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))\n" "svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))\n" "svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))\n" "svint8_t svqsub_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))\n" "svint32_t svqsub_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))\n" "svint64_t svqsub_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))\n" "svint16_t svqsub_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))\n" "svint8_t svqsub_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))\n" "svint32_t svqsub_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))\n" "svint64_t svqsub_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))\n" "svint16_t svqsub_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))\n" "svint8_t svqsub_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))\n" "svint32_t svqsub_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))\n" "svint64_t svqsub_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))\n" "svint16_t svqsub_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))\n" "svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))\n" "svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))\n" "svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))\n" "svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))\n" "svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))\n" "svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))\n" "svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))\n" "svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))\n" "svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))\n" "svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))\n" "svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))\n" "svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))\n" "svint8_t svqsub_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))\n" "svint32_t svqsub_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))\n" "svint64_t svqsub_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))\n" "svint16_t svqsub_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))\n" "svint8_t svqsub_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))\n" "svint32_t svqsub_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))\n" "svint64_t svqsub_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))\n" "svint16_t svqsub_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))\n" "svint8_t svqsub_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))\n" "svint32_t svqsub_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))\n" "svint64_t svqsub_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))\n" "svint16_t svqsub_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))\n" "svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))\n" "svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))\n" "svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))\n" "svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))\n" "svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))\n" "svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))\n" "svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))\n" "svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))\n" "svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))\n" "svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))\n" "svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))\n" "svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))\n" "svint8_t svqsubr_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))\n" "svint32_t svqsubr_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))\n" "svint64_t svqsubr_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))\n" "svint16_t svqsubr_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))\n" "svint8_t svqsubr_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))\n" "svint32_t svqsubr_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))\n" "svint64_t svqsubr_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))\n" "svint16_t svqsubr_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))\n" "svint8_t svqsubr_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))\n" "svint32_t svqsubr_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))\n" "svint64_t svqsubr_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))\n" "svint16_t svqsubr_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))\n" "svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))\n" "svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))\n" "svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))\n" "svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))\n" "svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))\n" "svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))\n" "svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))\n" "svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))\n" "svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))\n" "svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))\n" "svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))\n" "svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))\n" "svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))\n" "svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))\n" "svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))\n" "svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))\n" "svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))\n" "svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))\n" "svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))\n" "svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))\n" "svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))\n" "svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))\n" "svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))\n" "svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))\n" "svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))\n" "svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))\n" "svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))\n" "svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))\n" "svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))\n" "svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))\n" "svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))\n" "svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))\n" "svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))\n" "svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))\n" "svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))\n" "svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))\n" "svint16_t svqxtnb(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))\n" "svint32_t svqxtnb(svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))\n" "svint8_t svqxtnb(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))\n" "svuint16_t svqxtnb(svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))\n" "svuint32_t svqxtnb(svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))\n" "svuint8_t svqxtnb(svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))\n" "svint16_t svqxtnt(svint16_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))\n" "svint32_t svqxtnt(svint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))\n" "svint8_t svqxtnt(svint8_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))\n" "svuint16_t svqxtnt(svuint16_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))\n" "svuint32_t svqxtnt(svuint32_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))\n" "svuint8_t svqxtnt(svuint8_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))\n" "svuint16_t svqxtunb(svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))\n" "svuint32_t svqxtunb(svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))\n" "svuint8_t svqxtunb(svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))\n" "svuint16_t svqxtunt(svuint16_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))\n" "svuint32_t svqxtunt(svuint32_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))\n" "svuint8_t svqxtunt(svuint8_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))\n" "svuint16_t svraddhnb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))\n" "svuint32_t svraddhnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))\n" "svuint8_t svraddhnb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))\n" "svint16_t svraddhnb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))\n" "svint32_t svraddhnb(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))\n" "svint8_t svraddhnb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))\n" "svuint16_t svraddhnb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))\n" "svuint32_t svraddhnb(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))\n" "svuint8_t svraddhnb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))\n" "svint16_t svraddhnb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))\n" "svint32_t svraddhnb(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))\n" "svint8_t svraddhnb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))\n" "svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))\n" "svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))\n" "svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))\n" "svint16_t svraddhnt(svint16_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))\n" "svint32_t svraddhnt(svint32_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))\n" "svint8_t svraddhnt(svint8_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))\n" "svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))\n" "svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))\n" "svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))\n" "svint16_t svraddhnt(svint16_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))\n" "svint32_t svraddhnt(svint32_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))\n" "svint8_t svraddhnt(svint8_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))\n" "svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))\n" "svuint32_t svrecpe_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))\n" "svuint32_t svrecpe_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))\n" "svint8_t svrhadd_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))\n" "svint32_t svrhadd_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))\n" "svint64_t svrhadd_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))\n" "svint16_t svrhadd_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))\n" "svint8_t svrhadd_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))\n" "svint32_t svrhadd_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))\n" "svint64_t svrhadd_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))\n" "svint16_t svrhadd_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))\n" "svint8_t svrhadd_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))\n" "svint32_t svrhadd_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))\n" "svint64_t svrhadd_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))\n" "svint16_t svrhadd_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))\n" "svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))\n" "svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))\n" "svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))\n" "svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))\n" "svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))\n" "svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))\n" "svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))\n" "svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))\n" "svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))\n" "svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))\n" "svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))\n" "svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))\n" "svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))\n" "svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))\n" "svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))\n" "svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))\n" "svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))\n" "svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))\n" "svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))\n" "svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))\n" "svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))\n" "svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))\n" "svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))\n" "svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))\n" "svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))\n" "svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))\n" "svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))\n" "svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))\n" "svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))\n" "svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))\n" "svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))\n" "svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))\n" "svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))\n" "svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))\n" "svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))\n" "svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))\n" "svint8_t svrshl_m(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))\n" "svint32_t svrshl_m(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))\n" "svint64_t svrshl_m(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))\n" "svint16_t svrshl_m(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))\n" "svint8_t svrshl_x(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))\n" "svint32_t svrshl_x(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))\n" "svint64_t svrshl_x(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))\n" "svint16_t svrshl_x(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))\n" "svint8_t svrshl_z(svbool_t, svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))\n" "svint32_t svrshl_z(svbool_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))\n" "svint64_t svrshl_z(svbool_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))\n" "svint16_t svrshl_z(svbool_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))\n" "svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))\n" "svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))\n" "svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))\n" "svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))\n" "svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))\n" "svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))\n" "svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))\n" "svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))\n" "svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))\n" "svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))\n" "svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))\n" "svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))\n" "svint8_t svrshl_m(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))\n" "svint32_t svrshl_m(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))\n" "svint64_t svrshl_m(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))\n" "svint16_t svrshl_m(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))\n" "svint8_t svrshl_x(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))\n" "svint32_t svrshl_x(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))\n" "svint64_t svrshl_x(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))\n" "svint16_t svrshl_x(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))\n" "svint8_t svrshl_z(svbool_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))\n" "svint32_t svrshl_z(svbool_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))\n" "svint64_t svrshl_z(svbool_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))\n" "svint16_t svrshl_z(svbool_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))\n" "svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))\n" "svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))\n" "svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))\n" "svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))\n" "svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))\n" "svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))\n" "svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))\n" "svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))\n" "svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))\n" "svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))\n" "svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))\n" "svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))\n" "svint8_t svrshr_m(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))\n" "svint32_t svrshr_m(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))\n" "svint64_t svrshr_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))\n" "svint16_t svrshr_m(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))\n" "svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))\n" "svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))\n" "svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))\n" "svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))\n" "svint8_t svrshr_x(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))\n" "svint32_t svrshr_x(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))\n" "svint64_t svrshr_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))\n" "svint16_t svrshr_x(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))\n" "svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))\n" "svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))\n" "svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))\n" "svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))\n" "svint8_t svrshr_z(svbool_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))\n" "svint32_t svrshr_z(svbool_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))\n" "svint64_t svrshr_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))\n" "svint16_t svrshr_z(svbool_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))\n" "svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))\n" "svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))\n" "svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))\n" "svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))\n" "svuint16_t svrshrnb(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))\n" "svuint32_t svrshrnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))\n" "svuint8_t svrshrnb(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))\n" "svint16_t svrshrnb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))\n" "svint32_t svrshrnb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))\n" "svint8_t svrshrnb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))\n" "svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))\n" "svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))\n" "svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))\n" "svint16_t svrshrnt(svint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))\n" "svint32_t svrshrnt(svint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))\n" "svint8_t svrshrnt(svint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))\n" "svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))\n" "svuint32_t svrsqrte_x(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))\n" "svuint32_t svrsqrte_z(svbool_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))\n" "svint8_t svrsra(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))\n" "svint32_t svrsra(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))\n" "svint64_t svrsra(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))\n" "svint16_t svrsra(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))\n" "svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))\n" "svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))\n" "svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))\n" "svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))\n" "svuint16_t svrsubhnb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))\n" "svuint32_t svrsubhnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))\n" "svuint8_t svrsubhnb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))\n" "svint16_t svrsubhnb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))\n" "svint32_t svrsubhnb(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))\n" "svint8_t svrsubhnb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))\n" "svuint16_t svrsubhnb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))\n" "svuint32_t svrsubhnb(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))\n" "svuint8_t svrsubhnb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))\n" "svint16_t svrsubhnb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))\n" "svint32_t svrsubhnb(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))\n" "svint8_t svrsubhnb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))\n" "svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))\n" "svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))\n" "svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))\n" "svint16_t svrsubhnt(svint16_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))\n" "svint32_t svrsubhnt(svint32_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))\n" "svint8_t svrsubhnt(svint8_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))\n" "svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))\n" "svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))\n" "svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))\n" "svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))\n" "svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))\n" "svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))\n" "svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))\n" "svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))\n" "svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))\n" "svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))\n" "svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))\n" "svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))\n" "svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))\n" "svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))\n" "svint32_t svshllb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))\n" "svint64_t svshllb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))\n" "svint16_t svshllb(svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))\n" "svuint32_t svshllb(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))\n" "svuint64_t svshllb(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))\n" "svuint16_t svshllb(svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))\n" "svint32_t svshllt(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))\n" "svint64_t svshllt(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))\n" "svint16_t svshllt(svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))\n" "svuint32_t svshllt(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))\n" "svuint64_t svshllt(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))\n" "svuint16_t svshllt(svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))\n" "svuint16_t svshrnb(svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))\n" "svuint32_t svshrnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))\n" "svuint8_t svshrnb(svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))\n" "svint16_t svshrnb(svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))\n" "svint32_t svshrnb(svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))\n" "svint8_t svshrnb(svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))\n" "svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))\n" "svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))\n" "svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))\n" "svint16_t svshrnt(svint16_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))\n" "svint32_t svshrnt(svint32_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))\n" "svint8_t svshrnt(svint8_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))\n" "svuint8_t svsli(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))\n" "svuint32_t svsli(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))\n" "svuint64_t svsli(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))\n" "svuint16_t svsli(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))\n" "svint8_t svsli(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))\n" "svint32_t svsli(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))\n" "svint64_t svsli(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))\n" "svint16_t svsli(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))\n" "svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))\n" "svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))\n" "svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))\n" "svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))\n" "svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))\n" "svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))\n" "svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))\n" "svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))\n" "svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))\n" "svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))\n" "svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))\n" "svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))\n" "svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))\n" "svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))\n" "svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))\n" "svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))\n" "svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))\n" "svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))\n" "svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))\n" "svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))\n" "svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))\n" "svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))\n" "svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))\n" "svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))\n" "svint8_t svsra(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))\n" "svint32_t svsra(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))\n" "svint64_t svsra(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))\n" "svint16_t svsra(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))\n" "svuint8_t svsra(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))\n" "svuint32_t svsra(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))\n" "svuint64_t svsra(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))\n" "svuint16_t svsra(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))\n" "svuint8_t svsri(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))\n" "svuint32_t svsri(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))\n" "svuint64_t svsri(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))\n" "svuint16_t svsri(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))\n" "svint8_t svsri(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))\n" "svint32_t svsri(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))\n" "svint64_t svsri(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))\n" "svint16_t svsri(svint16_t, svint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))\n" "void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))\n" "void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))\n" "void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))\n" "void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))\n" "void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))\n" "void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))\n" "void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))\n" "void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))\n" "void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))\n" "void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))\n" "void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))\n" "void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))\n" "void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))\n" "void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))\n" "void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))\n" "void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))\n" "void svstnt1_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))\n" "void svstnt1_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))\n" "void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))\n" "void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))\n" "void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))\n" "void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))\n" "void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))\n" "void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))\n" "void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))\n" "void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))\n" "void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))\n" "void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))\n" "void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))\n" "void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))\n" "void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))\n" "void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))\n" "void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))\n" "void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))\n" "void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))\n" "void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))\n" "void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))\n" "void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))\n" "void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))\n" "void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))\n" "void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))\n" "void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))\n" "void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))\n" "void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))\n" "void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))\n" "void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))\n" "void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))\n" "void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))\n" "void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))\n" "void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))\n" "void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))\n" "void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))\n" "void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))\n" "void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))\n" "void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))\n" "void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))\n" "void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))\n" "void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))\n" "void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))\n" "void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))\n" "void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))\n" "void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))\n" "void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))\n" "void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))\n" "void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))\n" "void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))\n" "void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))\n" "void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))\n" "void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))\n" "void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))\n" "void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))\n" "void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))\n" "void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))\n" "void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))\n" "void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))\n" "void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))\n" "void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))\n" "void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))\n" "void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))\n" "void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))\n" "void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))\n" "void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))\n" "void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))\n" "svuint16_t svsubhnb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))\n" "svuint32_t svsubhnb(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))\n" "svuint8_t svsubhnb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))\n" "svint16_t svsubhnb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))\n" "svint32_t svsubhnb(svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))\n" "svint8_t svsubhnb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))\n" "svuint16_t svsubhnb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))\n" "svuint32_t svsubhnb(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))\n" "svuint8_t svsubhnb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))\n" "svint16_t svsubhnb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))\n" "svint32_t svsubhnb(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))\n" "svint8_t svsubhnb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))\n" "svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))\n" "svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))\n" "svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))\n" "svint16_t svsubhnt(svint16_t, svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))\n" "svint32_t svsubhnt(svint32_t, svint64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))\n" "svint8_t svsubhnt(svint8_t, svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))\n" "svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))\n" "svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))\n" "svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))\n" "svint16_t svsubhnt(svint16_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))\n" "svint32_t svsubhnt(svint32_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))\n" "svint8_t svsubhnt(svint8_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))\n" "svint32_t svsublb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))\n" "svint64_t svsublb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))\n" "svint16_t svsublb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))\n" "svuint32_t svsublb(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))\n" "svuint64_t svsublb(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))\n" "svuint16_t svsublb(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))\n" "svint32_t svsublb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))\n" "svint64_t svsublb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))\n" "svint16_t svsublb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))\n" "svuint32_t svsublb(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))\n" "svuint64_t svsublb(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))\n" "svuint16_t svsublb(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))\n" "svint32_t svsublbt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))\n" "svint64_t svsublbt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))\n" "svint16_t svsublbt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))\n" "svint32_t svsublbt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))\n" "svint64_t svsublbt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))\n" "svint16_t svsublbt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))\n" "svint32_t svsublt(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))\n" "svint64_t svsublt(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))\n" "svint16_t svsublt(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))\n" "svuint32_t svsublt(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))\n" "svuint64_t svsublt(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))\n" "svuint16_t svsublt(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))\n" "svint32_t svsublt(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))\n" "svint64_t svsublt(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))\n" "svint16_t svsublt(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))\n" "svuint32_t svsublt(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))\n" "svuint64_t svsublt(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))\n" "svuint16_t svsublt(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))\n" "svint32_t svsubltb(svint16_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))\n" "svint64_t svsubltb(svint32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))\n" "svint16_t svsubltb(svint8_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))\n" "svint32_t svsubltb(svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))\n" "svint64_t svsubltb(svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))\n" "svint16_t svsubltb(svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))\n" "svint32_t svsubwb(svint32_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))\n" "svint64_t svsubwb(svint64_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))\n" "svint16_t svsubwb(svint16_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))\n" "svuint32_t svsubwb(svuint32_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))\n" "svuint64_t svsubwb(svuint64_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))\n" "svuint16_t svsubwb(svuint16_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))\n" "svint32_t svsubwb(svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))\n" "svint64_t svsubwb(svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))\n" "svint16_t svsubwb(svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))\n" "svuint32_t svsubwb(svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))\n" "svuint64_t svsubwb(svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))\n" "svuint16_t svsubwb(svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))\n" "svint32_t svsubwt(svint32_t, int16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))\n" "svint64_t svsubwt(svint64_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))\n" "svint16_t svsubwt(svint16_t, int8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))\n" "svuint32_t svsubwt(svuint32_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))\n" "svuint64_t svsubwt(svuint64_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))\n" "svuint16_t svsubwt(svuint16_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))\n" "svint32_t svsubwt(svint32_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))\n" "svint64_t svsubwt(svint64_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))\n" "svint16_t svsubwt(svint16_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))\n" "svuint32_t svsubwt(svuint32_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))\n" "svuint64_t svsubwt(svuint64_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))\n" "svuint16_t svsubwt(svuint16_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))\n" "svuint8_t svtbl2(svuint8x2_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))\n" "svuint32_t svtbl2(svuint32x2_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))\n" "svuint64_t svtbl2(svuint64x2_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))\n" "svuint16_t svtbl2(svuint16x2_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))\n" "svint8_t svtbl2(svint8x2_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))\n" "svfloat64_t svtbl2(svfloat64x2_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))\n" "svfloat32_t svtbl2(svfloat32x2_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))\n" "svfloat16_t svtbl2(svfloat16x2_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))\n" "svint32_t svtbl2(svint32x2_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))\n" "svint64_t svtbl2(svint64x2_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))\n" "svint16_t svtbl2(svint16x2_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))\n" "svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))\n" "svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))\n" "svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))\n" "svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))\n" "svint8_t svtbx(svint8_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))\n" "svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))\n" "svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))\n" "svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))\n" "svint32_t svtbx(svint32_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))\n" "svint64_t svtbx(svint64_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))\n" "svint16_t svtbx(svint16_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))\n" "svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))\n" "svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))\n" "svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))\n" "svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))\n" "svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))\n" "svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))\n" "svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))\n" "svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))\n" "svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))\n" "svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))\n" "svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))\n" "svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))\n" "svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))\n" "svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))\n" "svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))\n" "svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))\n" "svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))\n" "svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))\n" "svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))\n" "svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))\n" "svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))\n" "svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))\n" "svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))\n" "svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))\n" "svbool_t svwhilege_b8(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))\n" "svbool_t svwhilege_b32(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))\n" "svbool_t svwhilege_b64(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))\n" "svbool_t svwhilege_b16(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))\n" "svbool_t svwhilege_b8(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))\n" "svbool_t svwhilege_b32(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))\n" "svbool_t svwhilege_b64(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))\n" "svbool_t svwhilege_b16(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))\n" "svbool_t svwhilege_b8(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))\n" "svbool_t svwhilege_b32(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))\n" "svbool_t svwhilege_b64(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))\n" "svbool_t svwhilege_b16(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))\n" "svbool_t svwhilege_b8(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))\n" "svbool_t svwhilege_b32(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))\n" "svbool_t svwhilege_b64(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))\n" "svbool_t svwhilege_b16(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))\n" "svbool_t svwhilegt_b8(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))\n" "svbool_t svwhilegt_b32(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))\n" "svbool_t svwhilegt_b64(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))\n" "svbool_t svwhilegt_b16(int32_t, int32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))\n" "svbool_t svwhilegt_b8(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))\n" "svbool_t svwhilegt_b32(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))\n" "svbool_t svwhilegt_b64(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))\n" "svbool_t svwhilegt_b16(int64_t, int64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))\n" "svbool_t svwhilegt_b8(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))\n" "svbool_t svwhilegt_b32(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))\n" "svbool_t svwhilegt_b64(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))\n" "svbool_t svwhilegt_b16(uint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))\n" "svbool_t svwhilegt_b8(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))\n" "svbool_t svwhilegt_b32(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))\n" "svbool_t svwhilegt_b64(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))\n" "svbool_t svwhilegt_b16(uint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))\n" "svbool_t svwhilerw(uint8_t const *, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))\n" "svbool_t svwhilerw(int8_t const *, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))\n" "svbool_t svwhilerw(uint64_t const *, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))\n" "svbool_t svwhilerw(float64_t const *, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))\n" "svbool_t svwhilerw(int64_t const *, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))\n" "svbool_t svwhilerw(uint16_t const *, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))\n" "svbool_t svwhilerw(float16_t const *, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))\n" "svbool_t svwhilerw(int16_t const *, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))\n" "svbool_t svwhilerw(uint32_t const *, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))\n" "svbool_t svwhilerw(float32_t const *, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))\n" "svbool_t svwhilerw(int32_t const *, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))\n" "svbool_t svwhilewr(uint8_t const *, uint8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))\n" "svbool_t svwhilewr(int8_t const *, int8_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))\n" "svbool_t svwhilewr(uint64_t const *, uint64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))\n" "svbool_t svwhilewr(float64_t const *, float64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))\n" "svbool_t svwhilewr(int64_t const *, int64_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))\n" "svbool_t svwhilewr(uint16_t const *, uint16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))\n" "svbool_t svwhilewr(float16_t const *, float16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))\n" "svbool_t svwhilewr(int16_t const *, int16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))\n" "svbool_t svwhilewr(uint32_t const *, uint32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))\n" "svbool_t svwhilewr(float32_t const *, float32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))\n" "svbool_t svwhilewr(int32_t const *, int32_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))\n" "svuint8_t svxar(svuint8_t, svuint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))\n" "svuint32_t svxar(svuint32_t, svuint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))\n" "svuint64_t svxar(svuint64_t, svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))\n" "svuint16_t svxar(svuint16_t, svuint16_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))\n" "svint8_t svxar(svint8_t, svint8_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))\n" "svint32_t svxar(svint32_t, svint32_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))\n" "svint64_t svxar(svint64_t, svint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))\n" "svint16_t svxar(svint16_t, svint16_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))\n" "svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))\n" "svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))\n" "svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))\n" "svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))\n" "svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))\n" "svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))\n" "svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))\n" "svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))\n" "svuint8_t svaesd_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))\n" "svuint8_t svaese_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))\n" "svuint8_t svaesimc_u8(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))\n" "svuint8_t svaesmc_u8(svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))\n" "svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))\n" "svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))\n" "svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))\n" "svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))\n" "svuint8_t svaesd(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))\n" "svuint8_t svaese(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))\n" "svuint8_t svaesimc(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))\n" "svuint8_t svaesmc(svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))\n" "svuint64_t svpmullb_pair(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))\n" "svuint64_t svpmullb_pair(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))\n" "svuint64_t svpmullt_pair(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))\n" "svuint64_t svpmullt_pair(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))\n" "svuint8_t svbdep_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))\n" "svuint32_t svbdep_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))\n" "svuint64_t svbdep_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))\n" "svuint16_t svbdep_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))\n" "svuint8_t svbdep_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))\n" "svuint32_t svbdep_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))\n" "svuint64_t svbdep_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))\n" "svuint16_t svbdep_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))\n" "svuint8_t svbext_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))\n" "svuint32_t svbext_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))\n" "svuint64_t svbext_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))\n" "svuint16_t svbext_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))\n" "svuint8_t svbext_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))\n" "svuint32_t svbext_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))\n" "svuint64_t svbext_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))\n" "svuint16_t svbext_u16(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))\n" "svuint8_t svbgrp_n_u8(svuint8_t, uint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))\n" "svuint32_t svbgrp_n_u32(svuint32_t, uint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))\n" "svuint64_t svbgrp_n_u64(svuint64_t, uint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))\n" "svuint16_t svbgrp_n_u16(svuint16_t, uint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))\n" "svuint8_t svbgrp_u8(svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))\n" "svuint32_t svbgrp_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))\n" "svuint64_t svbgrp_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))\n" "svuint16_t svbgrp_u16(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))\n" "svuint8_t svbdep(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))\n" "svuint32_t svbdep(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))\n" "svuint64_t svbdep(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))\n" "svuint16_t svbdep(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))\n" "svuint8_t svbdep(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))\n" "svuint32_t svbdep(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))\n" "svuint64_t svbdep(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))\n" "svuint16_t svbdep(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))\n" "svuint8_t svbext(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))\n" "svuint32_t svbext(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))\n" "svuint64_t svbext(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))\n" "svuint16_t svbext(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))\n" "svuint8_t svbext(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))\n" "svuint32_t svbext(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))\n" "svuint64_t svbext(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))\n" "svuint16_t svbext(svuint16_t, svuint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))\n" "svuint8_t svbgrp(svuint8_t, uint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))\n" "svuint32_t svbgrp(svuint32_t, uint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))\n" "svuint64_t svbgrp(svuint64_t, uint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))\n" "svuint16_t svbgrp(svuint16_t, uint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))\n" "svuint8_t svbgrp(svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))\n" "svuint32_t svbgrp(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))\n" "svuint64_t svbgrp(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))\n" "svuint16_t svbgrp(svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))\n" "svuint64_t svrax1_u64(svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))\n" "svint64_t svrax1_s64(svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))\n" "svuint64_t svrax1(svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))\n" "svint64_t svrax1(svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))\n" "svuint32_t svsm4e_u32(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))\n" "svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))\n" "svuint32_t svsm4e(svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))\n" "svuint32_t svsm4ekey(svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64)))\n" "svfloat64_t svclamp_f64(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32)))\n" "svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16)))\n" "svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8)))\n" "svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32)))\n" "svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64)))\n" "svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16)))\n" "svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8)))\n" "svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32)))\n" "svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64)))\n" "svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16)))\n" "svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c8)))\n" "svcount_t svptrue_c8(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c32)))\n" "svcount_t svptrue_c32(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64)))\n" "svcount_t svptrue_c64(void);\n" "__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16)))\n" "svcount_t svptrue_c16(void);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64)))\n" "svfloat64_t svclamp(svfloat64_t, svfloat64_t, svfloat64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32)))\n" "svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16)))\n" "svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8)))\n" "svint8_t svclamp(svint8_t, svint8_t, svint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32)))\n" "svint32_t svclamp(svint32_t, svint32_t, svint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64)))\n" "svint64_t svclamp(svint64_t, svint64_t, svint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16)))\n" "svint16_t svclamp(svint16_t, svint16_t, svint16_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8)))\n" "svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32)))\n" "svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64)))\n" "svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t);\n" "__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16)))\n" "svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t);\n" "#define svcvtnt_bf16_x svcvtnt_bf16_m\n" "#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m\n" "#define svcvtnt_f16_x svcvtnt_f16_m\n" "#define svcvtnt_f16_f32_x svcvtnt_f16_f32_m\n" "#define svcvtnt_f32_x svcvtnt_f32_m\n" "#define svcvtnt_f32_f64_x svcvtnt_f32_f64_m\n" "\n" "#define svcvtxnt_f32_x svcvtxnt_f32_m\n" "#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m\n" "\n" "#ifdef __cplusplus\n" "} // extern \"C\"\n" "#endif\n" "\n" "#undef __ai\n" "\n" "#undef __aio\n" "\n" "#endif /* __ARM_SVE_H */\n" "" } , { "/builtins/armintr.h" , "/*===---- armintr.h - ARM Windows intrinsics -------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/* Only include this if we're compiling for the windows platform. */\n" "#ifndef _MSC_VER\n" "#include_next \n" "#else\n" "\n" "#ifndef __ARMINTR_H\n" "#define __ARMINTR_H\n" "\n" "typedef enum\n" "{\n" " _ARM_BARRIER_SY = 0xF,\n" " _ARM_BARRIER_ST = 0xE,\n" " _ARM_BARRIER_ISH = 0xB,\n" " _ARM_BARRIER_ISHST = 0xA,\n" " _ARM_BARRIER_NSH = 0x7,\n" " _ARM_BARRIER_NSHST = 0x6,\n" " _ARM_BARRIER_OSH = 0x3,\n" " _ARM_BARRIER_OSHST = 0x2\n" "} _ARMINTR_BARRIER_TYPE;\n" "\n" "#endif /* __ARMINTR_H */\n" "#endif /* _MSC_VER */\n" "" } , { "/builtins/avx2intrin.h" , "/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __AVX2INTRIN_H\n" "#define __AVX2INTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__(\"avx2\"), __min_vector_width__(256)))\n" "#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__(\"avx2\"), __min_vector_width__(128)))\n" "\n" "/* SSE4 Multiple Packed Sums of Absolute Difference. */\n" "/// Computes sixteen sum of absolute difference (SAD) operations on sets of\n" "/// four unsigned 8-bit integers from the 256-bit integer vectors \\a X and\n" "/// \\a Y.\n" "///\n" "/// Eight SAD results are computed using the lower half of the input\n" "/// vectors, and another eight using the upper half. These 16-bit values\n" "/// are returned in the lower and upper halves of the 256-bit result,\n" "/// respectively.\n" "///\n" "/// A single SAD operation selects four bytes from \\a X and four bytes from\n" "/// \\a Y as input. It computes the differences between each \\a X byte and\n" "/// the corresponding \\a Y byte, takes the absolute value of each\n" "/// difference, and sums these four values to form one 16-bit result. The\n" "/// intrinsic computes 16 of these results with different sets of input\n" "/// bytes.\n" "///\n" "/// For each set of eight results, the SAD operations use the same four\n" "/// bytes from \\a Y; the starting bit position for these four bytes is\n" "/// specified by \\a M[1:0] times 32. The eight operations use successive\n" "/// sets of four bytes from \\a X; the starting bit position for the first\n" "/// set of four bytes is specified by \\a M[2] times 32. These bit positions\n" "/// are all relative to the 128-bit lane for each set of eight operations.\n" "///\n" "/// \\code{.operation}\n" "/// r := 0\n" "/// FOR i := 0 TO 1\n" "/// j := i*3\n" "/// Ybase := M[j+1:j]*32 + i*128\n" "/// Xbase := M[j+2]*32 + i*128\n" "/// FOR k := 0 TO 3\n" "/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase])\n" "/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8])\n" "/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16])\n" "/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24])\n" "/// result[r+15:r] := temp0 + temp1 + temp2 + temp3\n" "/// Xbase := Xbase + 8\n" "/// r := r + 16\n" "/// ENDFOR\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VMPSADBW instruction.\n" "///\n" "/// \\param X\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\param Y\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\param M\n" "/// An unsigned immediate value specifying the starting positions of the\n" "/// bytes to operate on.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "#define _mm256_mpsadbw_epu8(X, Y, M) \\\n" " ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \\\n" " (__v32qi)(__m256i)(Y), (int)(M)))\n" "\n" "/// Computes the absolute value of each signed byte in the 256-bit integer\n" "/// vector \\a __a and returns each value in the corresponding byte of\n" "/// the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_abs_epi8(__m256i __a)\n" "{\n" " return (__m256i)__builtin_elementwise_abs((__v32qs)__a);\n" "}\n" "\n" "/// Computes the absolute value of each signed 16-bit element in the 256-bit\n" "/// vector of [16 x i16] in \\a __a and returns each value in the\n" "/// corresponding element of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_abs_epi16(__m256i __a)\n" "{\n" " return (__m256i)__builtin_elementwise_abs((__v16hi)__a);\n" "}\n" "\n" "/// Computes the absolute value of each signed 32-bit element in the 256-bit\n" "/// vector of [8 x i32] in \\a __a and returns each value in the\n" "/// corresponding element of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_abs_epi32(__m256i __a)\n" "{\n" " return (__m256i)__builtin_elementwise_abs((__v8si)__a);\n" "}\n" "\n" "/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit\n" "/// integers using signed saturation, and returns the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*16\n" "/// k := i*8\n" "/// result[7+k:k] := SATURATE8(__a[15+j:j])\n" "/// result[71+k:64+k] := SATURATE8(__b[15+j:j])\n" "/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j])\n" "/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPACKSSWB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] used to generate result[63:0] and\n" "/// result[191:128].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] used to generate result[127:64] and\n" "/// result[255:192].\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_packs_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit\n" "/// integers using signed saturation, and returns the resulting 256-bit\n" "/// vector of [16 x i16].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*32\n" "/// k := i*16\n" "/// result[15+k:k] := SATURATE16(__a[31+j:j])\n" "/// result[79+k:64+k] := SATURATE16(__b[31+j:j])\n" "/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j])\n" "/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPACKSSDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] used to generate result[63:0] and\n" "/// result[191:128].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] used to generate result[127:64] and\n" "/// result[255:192].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_packs_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers\n" "/// using unsigned saturation, and returns the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*16\n" "/// k := i*8\n" "/// result[7+k:k] := SATURATE8U(__a[15+j:j])\n" "/// result[71+k:64+k] := SATURATE8U(__b[15+j:j])\n" "/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j])\n" "/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPACKUSWB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] used to generate result[63:0] and\n" "/// result[191:128].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] used to generate result[127:64] and\n" "/// result[255:192].\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_packus_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers\n" "/// using unsigned saturation, and returns the resulting 256-bit vector of\n" "/// [16 x i16].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*32\n" "/// k := i*16\n" "/// result[15+k:k] := SATURATE16U(__V1[31+j:j])\n" "/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j])\n" "/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j])\n" "/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPACKUSDW instruction.\n" "///\n" "/// \\param __V1\n" "/// A 256-bit vector of [8 x i32] used to generate result[63:0] and\n" "/// result[191:128].\n" "/// \\param __V2\n" "/// A 256-bit vector of [8 x i32] used to generate result[127:64] and\n" "/// result[255:192].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_packus_epi32(__m256i __V1, __m256i __V2)\n" "{\n" " return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);\n" "}\n" "\n" "/// Adds 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors and returns the lower 8 bits of each sum in the corresponding\n" "/// byte of the 256-bit integer vector result (overflow is ignored).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\returns A 256-bit integer vector containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_add_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v32qu)__a + (__v32qu)__b);\n" "}\n" "\n" "/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of\n" "/// [16 x i16] and returns the lower 16 bits of each sum in the\n" "/// corresponding element of the [16 x i16] result (overflow is ignored).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_add_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v16hu)__a + (__v16hu)__b);\n" "}\n" "\n" "/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of\n" "/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding\n" "/// element of the [8 x i32] result (overflow is ignored).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_add_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v8su)__a + (__v8su)__b);\n" "}\n" "\n" "/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of\n" "/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding\n" "/// element of the [4 x i64] result (overflow is ignored).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_add_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4du)__a + (__v4du)__b);\n" "}\n" "\n" "/// Adds 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors using signed saturation, and returns each sum in the\n" "/// corresponding byte of the 256-bit integer vector result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\returns A 256-bit integer vector containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_adds_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);\n" "}\n" "\n" "/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of\n" "/// [16 x i16] using signed saturation, and returns the [16 x i16] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_adds_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Adds 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors using unsigned saturation, and returns each sum in the\n" "/// corresponding byte of the 256-bit integer vector result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDUSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing one of the source operands.\n" "/// \\returns A 256-bit integer vector containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_adds_epu8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);\n" "}\n" "\n" "/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of\n" "/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPADDUSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_adds_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);\n" "}\n" "\n" "/// Uses the lower half of the 256-bit vector \\a a as the upper half of a\n" "/// temporary 256-bit value, and the lower half of the 256-bit vector \\a b\n" "/// as the lower half of the temporary value. Right-shifts the temporary\n" "/// value by \\a n bytes, and uses the lower 16 bytes of the shifted value\n" "/// as the lower 16 bytes of the result. Uses the upper halves of \\a a and\n" "/// \\a b to make another temporary value, right shifts by \\a n, and uses\n" "/// the lower 16 bytes of the shifted value as the upper 16 bytes of the\n" "/// result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPALIGNR instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param b\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param n\n" "/// An immediate value specifying the number of bytes to shift.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_alignr_epi8(a, b, n) \\\n" " ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \\\n" " (__v32qi)(__m256i)(b), (n)))\n" "\n" "/// Computes the bitwise AND of the 256-bit integer vectors in \\a __a and\n" "/// \\a __b.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPAND instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_and_si256(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4du)__a & (__v4du)__b);\n" "}\n" "\n" "/// Computes the bitwise AND of the 256-bit integer vector in \\a __b with\n" "/// the bitwise NOT of the 256-bit integer vector in \\a __a.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPANDN instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_andnot_si256(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)(~(__v4du)__a & (__v4du)__b);\n" "}\n" "\n" "/// Computes the averages of the corresponding unsigned bytes in the two\n" "/// 256-bit integer vectors in \\a __a and \\a __b and returns each\n" "/// average in the corresponding byte of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPAVGB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_avg_epu8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);\n" "}\n" "\n" "/// Computes the averages of the corresponding unsigned 16-bit integers in\n" "/// the two 256-bit vectors of [16 x i16] in \\a __a and \\a __b and returns\n" "/// each average in the corresponding element of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPAVGW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_avg_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Merges 8-bit integer values from either of the two 256-bit vectors\n" "/// \\a __V1 or \\a __V2, as specified by the 256-bit mask \\a __M and returns\n" "/// the resulting 256-bit integer vector.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// IF __M[7+i] == 0\n" "/// result[7+j:j] := __V1[7+j:j]\n" "/// ELSE\n" "/// result[7+j:j] := __V2[7+j:j]\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBLENDVB instruction.\n" "///\n" "/// \\param __V1\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param __V2\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param __M\n" "/// A 256-bit integer vector, with bit [7] of each byte specifying the\n" "/// source for each corresponding byte of the result. When the mask bit\n" "/// is 0, the byte is copied from \\a __V1; otherwise, it is copied from\n" "/// \\a __V2.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)\n" "{\n" " return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,\n" " (__v32qi)__M);\n" "}\n" "\n" "/// Merges 16-bit integer values from either of the two 256-bit vectors\n" "/// \\a V1 or \\a V2, as specified by the immediate integer operand \\a M,\n" "/// and returns the resulting 256-bit vector of [16 x i16].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*16\n" "/// IF M[i] == 0\n" "/// result[7+j:j] := V1[7+j:j]\n" "/// result[135+j:128+j] := V1[135+j:128+j]\n" "/// ELSE\n" "/// result[7+j:j] := V2[7+j:j]\n" "/// result[135+j:128+j] := V2[135+j:128+j]\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPBLENDW instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [16 x i16] containing source values.\n" "/// \\param V2\n" "/// A 256-bit vector of [16 x i16] containing source values.\n" "/// \\param M\n" "/// An immediate 8-bit integer operand, with bits [7:0] specifying the\n" "/// source for each element of the result. The position of the mask bit\n" "/// corresponds to the index of a copied value. When a mask bit is 0, the\n" "/// element is copied from \\a V1; otherwise, it is copied from \\a V2.\n" "/// \\a M[0] determines the source for elements 0 and 8, \\a M[1] for\n" "/// elements 1 and 9, and so forth.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "#define _mm256_blend_epi16(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \\\n" " (__v16hi)(__m256i)(V2), (int)(M)))\n" "\n" "/// Compares corresponding bytes in the 256-bit integer vectors in \\a __a and\n" "/// \\a __b for equality and returns the outcomes in the corresponding\n" "/// bytes of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPEQB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpeq_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v32qi)__a == (__v32qi)__b);\n" "}\n" "\n" "/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in\n" "/// \\a __a and \\a __b for equality and returns the outcomes in the\n" "/// corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPEQW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpeq_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v16hi)__a == (__v16hi)__b);\n" "}\n" "\n" "/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in\n" "/// \\a __a and \\a __b for equality and returns the outcomes in the\n" "/// corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPEQD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpeq_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v8si)__a == (__v8si)__b);\n" "}\n" "\n" "/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in\n" "/// \\a __a and \\a __b for equality and returns the outcomes in the\n" "/// corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPEQQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpeq_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4di)__a == (__v4di)__b);\n" "}\n" "\n" "/// Compares corresponding signed bytes in the 256-bit integer vectors in\n" "/// \\a __a and \\a __b for greater-than and returns the outcomes in the\n" "/// corresponding bytes of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPGTB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing one of the inputs.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpgt_epi8(__m256i __a, __m256i __b)\n" "{\n" " /* This function always performs a signed comparison, but __v32qi is a char\n" " which may be signed or unsigned, so use __v32qs. */\n" " return (__m256i)((__v32qs)__a > (__v32qs)__b);\n" "}\n" "\n" "/// Compares corresponding signed elements in the 256-bit vectors of\n" "/// [16 x i16] in \\a __a and \\a __b for greater-than and returns the\n" "/// outcomes in the corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPGTW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpgt_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v16hi)__a > (__v16hi)__b);\n" "}\n" "\n" "/// Compares corresponding signed elements in the 256-bit vectors of\n" "/// [8 x i32] in \\a __a and \\a __b for greater-than and returns the\n" "/// outcomes in the corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPGTD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpgt_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v8si)__a > (__v8si)__b);\n" "}\n" "\n" "/// Compares corresponding signed elements in the 256-bit vectors of\n" "/// [4 x i64] in \\a __a and \\a __b for greater-than and returns the\n" "/// outcomes in the corresponding elements of the 256-bit result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPCMPGTQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] containing one of the inputs.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] containing one of the inputs.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmpgt_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4di)__a > (__v4di)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit\n" "/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an\n" "/// element of the [16 x i16] result (overflow is ignored). Sums from\n" "/// \\a __a are returned in the lower 64 bits of each 128-bit half of the\n" "/// result; sums from \\a __b are returned in the upper 64 bits of each\n" "/// 128-bit half of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16]\n" "/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48]\n" "/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80]\n" "/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112]\n" "/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16]\n" "/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48]\n" "/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80]\n" "/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hadd_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit\n" "/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an\n" "/// element of the [8 x i32] result (overflow is ignored). Sums from \\a __a\n" "/// are returned in the lower 64 bits of each 128-bit half of the result;\n" "/// sums from \\a __b are returned in the upper 64 bits of each 128-bit half\n" "/// of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32]\n" "/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96]\n" "/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32]\n" "/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hadd_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit\n" "/// vectors of [16 x i16] using signed saturation and returns each sum in\n" "/// an element of the [16 x i16] result. Sums from \\a __a are returned in\n" "/// the lower 64 bits of each 128-bit half of the result; sums from \\a __b\n" "/// are returned in the upper 64 bits of each 128-bit half of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16])\n" "/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48])\n" "/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80])\n" "/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112])\n" "/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16])\n" "/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48])\n" "/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80])\n" "/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sums.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hadds_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit\n" "/// vectors of [16 x i16] and returns the lower 16 bits of each difference\n" "/// in an element of the [16 x i16] result (overflow is ignored).\n" "/// Differences from \\a __a are returned in the lower 64 bits of each\n" "/// 128-bit half of the result; differences from \\a __b are returned in the\n" "/// upper 64 bits of each 128-bit half of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16]\n" "/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48]\n" "/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80]\n" "/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112]\n" "/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16]\n" "/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48]\n" "/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80]\n" "/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hsub_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit\n" "/// vectors of [8 x i32] and returns the lower 32 bits of each difference in\n" "/// an element of the [8 x i32] result (overflow is ignored). Differences\n" "/// from \\a __a are returned in the lower 64 bits of each 128-bit half of\n" "/// the result; differences from \\a __b are returned in the upper 64 bits\n" "/// of each 128-bit half of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32]\n" "/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96]\n" "/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32]\n" "/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hsub_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit\n" "/// vectors of [16 x i16] using signed saturation and returns each sum in\n" "/// an element of the [16 x i16] result. Differences from \\a __a are\n" "/// returned in the lower 64 bits of each 128-bit half of the result;\n" "/// differences from \\a __b are returned in the upper 64 bits of each\n" "/// 128-bit half of the result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16])\n" "/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48])\n" "/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80])\n" "/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112])\n" "/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16])\n" "/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48])\n" "/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80])\n" "/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_hsubs_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Multiplies each unsigned byte from the 256-bit integer vector in \\a __a\n" "/// with the corresponding signed byte from the 256-bit integer vector in\n" "/// \\a __b, forming signed 16-bit intermediate products. Adds adjacent\n" "/// pairs of those products using signed saturation to form 16-bit sums\n" "/// returned as elements of the [16 x i16] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// temp1 := __a[j+7:j] * __b[j+7:j]\n" "/// temp2 := __a[j+15:j+8] * __b[j+15:j+8]\n" "/// result[j+15:j] := SATURATE16(temp1 + temp2)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMADDUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_maddubs_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);\n" "}\n" "\n" "/// Multiplies corresponding 16-bit elements of two 256-bit vectors of\n" "/// [16 x i16], forming 32-bit intermediate products, and adds pairs of\n" "/// those products to form 32-bit sums returned as elements of the\n" "/// [8 x i32] result.\n" "///\n" "/// There is only one wraparound case: when all four of the 16-bit sources\n" "/// are \\c 0x8000, the result will be \\c 0x80000000.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// temp1 := __a[j+15:j] * __b[j+15:j]\n" "/// temp2 := __a[j+31:j+16] * __b[j+31:j+16]\n" "/// result[j+31:j] := temp1 + temp2\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMADDWD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_madd_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Compares the corresponding signed bytes in the two 256-bit integer vectors\n" "/// in \\a __a and \\a __b and returns the larger of each pair in the\n" "/// corresponding byte of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);\n" "}\n" "\n" "/// Compares the corresponding signed 16-bit integers in the two 256-bit\n" "/// vectors of [16 x i16] in \\a __a and \\a __b and returns the larger of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Compares the corresponding signed 32-bit integers in the two 256-bit\n" "/// vectors of [8 x i32] in \\a __a and \\a __b and returns the larger of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned bytes in the two 256-bit integer\n" "/// vectors in \\a __a and \\a __b and returns the larger of each pair in\n" "/// the corresponding byte of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epu8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned 16-bit integers in the two 256-bit\n" "/// vectors of [16 x i16] in \\a __a and \\a __b and returns the larger of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXUW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned 32-bit integers in the two 256-bit\n" "/// vectors of [8 x i32] in \\a __a and \\a __b and returns the larger of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMAXUD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_max_epu32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);\n" "}\n" "\n" "/// Compares the corresponding signed bytes in the two 256-bit integer vectors\n" "/// in \\a __a and \\a __b and returns the smaller of each pair in the\n" "/// corresponding byte of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);\n" "}\n" "\n" "/// Compares the corresponding signed 16-bit integers in the two 256-bit\n" "/// vectors of [16 x i16] in \\a __a and \\a __b and returns the smaller of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Compares the corresponding signed 32-bit integers in the two 256-bit\n" "/// vectors of [8 x i32] in \\a __a and \\a __b and returns the smaller of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned bytes in the two 256-bit integer\n" "/// vectors in \\a __a and \\a __b and returns the smaller of each pair in\n" "/// the corresponding byte of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epu8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned 16-bit integers in the two 256-bit\n" "/// vectors of [16 x i16] in \\a __a and \\a __b and returns the smaller of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINUW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);\n" "}\n" "\n" "/// Compares the corresponding unsigned 32-bit integers in the two 256-bit\n" "/// vectors of [8 x i32] in \\a __a and \\a __b and returns the smaller of\n" "/// each pair in the corresponding element of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMINUD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_min_epu32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);\n" "}\n" "\n" "static __inline__ int __DEFAULT_FN_ATTRS256\n" "_mm256_movemask_epi8(__m256i __a)\n" "{\n" " return __builtin_ia32_pmovmskb256((__v32qi)__a);\n" "}\n" "\n" "/// Sign-extends bytes from the 128-bit integer vector in \\a __V and returns\n" "/// the 16-bit values in the corresponding elements of a 256-bit vector\n" "/// of [16 x i16].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*8\n" "/// k := i*16\n" "/// result[k+15:k] := SignExtend(__V[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXBW instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi8_epi16(__m128i __V)\n" "{\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);\n" "}\n" "\n" "/// Sign-extends bytes from the lower half of the 128-bit integer vector in\n" "/// \\a __V and returns the 32-bit values in the corresponding elements of a\n" "/// 256-bit vector of [8 x i32].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*8\n" "/// k := i*32\n" "/// result[k+31:k] := SignExtend(__V[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXBD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi8_epi32(__m128i __V)\n" "{\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);\n" "}\n" "\n" "/// Sign-extends the first four bytes from the 128-bit integer vector in\n" "/// \\a __V and returns the 64-bit values in the corresponding elements of a\n" "/// 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := SignExtend(__V[7:0])\n" "/// result[127:64] := SignExtend(__V[15:8])\n" "/// result[191:128] := SignExtend(__V[23:16])\n" "/// result[255:192] := SignExtend(__V[31:24])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXBQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi8_epi64(__m128i __V)\n" "{\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);\n" "}\n" "\n" "/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in\n" "/// \\a __V and returns the 32-bit values in the corresponding elements of a\n" "/// 256-bit vector of [8 x i32].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*16\n" "/// k := i*32\n" "/// result[k+31:k] := SignExtend(__V[j+15:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXWD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16] containing the source values.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi16_epi32(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);\n" "}\n" "\n" "/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of\n" "/// [8 x i16] in \\a __V and returns the 64-bit values in the corresponding\n" "/// elements of a 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := SignExtend(__V[15:0])\n" "/// result[127:64] := SignExtend(__V[31:16])\n" "/// result[191:128] := SignExtend(__V[47:32])\n" "/// result[255:192] := SignExtend(__V[64:48])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXWQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16] containing the source values.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi16_epi64(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);\n" "}\n" "\n" "/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in\n" "/// \\a __V and returns the 64-bit values in the corresponding elements of a\n" "/// 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := SignExtend(__V[31:0])\n" "/// result[127:64] := SignExtend(__V[63:32])\n" "/// result[191:128] := SignExtend(__V[95:64])\n" "/// result[255:192] := SignExtend(__V[127:96])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXDQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [4 x i32] containing the source values.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the sign-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepi32_epi64(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);\n" "}\n" "\n" "/// Zero-extends bytes from the 128-bit integer vector in \\a __V and returns\n" "/// the 16-bit values in the corresponding elements of a 256-bit vector\n" "/// of [16 x i16].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*8\n" "/// k := i*16\n" "/// result[k+15:k] := ZeroExtend(__V[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVZXBW instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu8_epi16(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);\n" "}\n" "\n" "/// Zero-extends bytes from the lower half of the 128-bit integer vector in\n" "/// \\a __V and returns the 32-bit values in the corresponding elements of a\n" "/// 256-bit vector of [8 x i32].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*8\n" "/// k := i*32\n" "/// result[k+31:k] := ZeroExtend(__V[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVZXBD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu8_epi32(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);\n" "}\n" "\n" "/// Zero-extends the first four bytes from the 128-bit integer vector in\n" "/// \\a __V and returns the 64-bit values in the corresponding elements of a\n" "/// 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := ZeroExtend(__V[7:0])\n" "/// result[127:64] := ZeroExtend(__V[15:8])\n" "/// result[191:128] := ZeroExtend(__V[23:16])\n" "/// result[255:192] := ZeroExtend(__V[31:24])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVZXBQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit integer vector containing the source bytes.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu8_epi64(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);\n" "}\n" "\n" "/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in\n" "/// \\a __V and returns the 32-bit values in the corresponding elements of a\n" "/// 256-bit vector of [8 x i32].\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*16\n" "/// k := i*32\n" "/// result[k+31:k] := ZeroExtend(__V[j+15:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVZXWD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16] containing the source values.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu16_epi32(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);\n" "}\n" "\n" "/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of\n" "/// [8 x i16] in \\a __V and returns the 64-bit values in the corresponding\n" "/// elements of a 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := ZeroExtend(__V[15:0])\n" "/// result[127:64] := ZeroExtend(__V[31:16])\n" "/// result[191:128] := ZeroExtend(__V[47:32])\n" "/// result[255:192] := ZeroExtend(__V[64:48])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVSXWQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16] containing the source values.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu16_epi64(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);\n" "}\n" "\n" "/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in\n" "/// \\a __V and returns the 64-bit values in the corresponding elements of a\n" "/// 256-bit vector of [4 x i64].\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := ZeroExtend(__V[31:0])\n" "/// result[127:64] := ZeroExtend(__V[63:32])\n" "/// result[191:128] := ZeroExtend(__V[95:64])\n" "/// result[255:192] := ZeroExtend(__V[127:96])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMOVZXDQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [4 x i32] containing the source values.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the zero-extended\n" "/// values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cvtepu32_epi64(__m128i __V)\n" "{\n" " return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);\n" "}\n" "\n" "/// Multiplies signed 32-bit integers from even-numbered elements of two\n" "/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the\n" "/// [4 x i64] result.\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := __a[31:0] * __b[31:0]\n" "/// result[127:64] := __a[95:64] * __b[95:64]\n" "/// result[191:128] := __a[159:128] * __b[159:128]\n" "/// result[255:192] := __a[223:192] * __b[223:192]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mul_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Multiplies signed 16-bit integer elements of two 256-bit vectors of\n" "/// [16 x i16], truncates the 32-bit results to the most significant 18\n" "/// bits, rounds by adding 1, and returns bits [16:1] of each rounded\n" "/// product in the [16 x i16] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1\n" "/// result[j+15:j] := temp[16:1]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULHRSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the rounded products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mulhrs_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of\n" "/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the\n" "/// [16 x i16] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULHUW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mulhi_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Multiplies signed 16-bit integer elements of two 256-bit vectors of\n" "/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the\n" "/// [16 x i16] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULHW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mulhi_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Multiplies signed 16-bit integer elements of two 256-bit vectors of\n" "/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the\n" "/// [16 x i16] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mullo_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v16hu)__a * (__v16hu)__b);\n" "}\n" "\n" "/// Multiplies signed 32-bit integer elements of two 256-bit vectors of\n" "/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the\n" "/// [8 x i32] result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mullo_epi32 (__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v8su)__a * (__v8su)__b);\n" "}\n" "\n" "/// Multiplies unsigned 32-bit integers from even-numered elements of two\n" "/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the\n" "/// [4 x i64] result.\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := __a[31:0] * __b[31:0]\n" "/// result[127:64] := __a[95:64] * __b[95:64]\n" "/// result[191:128] := __a[159:128] * __b[159:128]\n" "/// result[255:192] := __a[223:192] * __b[223:192]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULUDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the products.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_mul_epu32(__m256i __a, __m256i __b)\n" "{\n" " return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Computes the bitwise OR of the 256-bit integer vectors in \\a __a and\n" "/// \\a __b.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPOR instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_or_si256(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4du)__a | (__v4du)__b);\n" "}\n" "\n" "/// Computes four sum of absolute difference (SAD) operations on sets of eight\n" "/// unsigned 8-bit integers from the 256-bit integer vectors \\a __a and\n" "/// \\a __b.\n" "///\n" "/// One SAD result is computed for each set of eight bytes from \\a __a and\n" "/// eight bytes from \\a __b. The zero-extended SAD value is returned in the\n" "/// corresponding 64-bit element of the result.\n" "///\n" "/// A single SAD operation takes the differences between the corresponding\n" "/// bytes of \\a __a and \\a __b, takes the absolute value of each difference,\n" "/// and sums these eight values to form one 16-bit result. This operation\n" "/// is repeated four times with successive sets of eight bytes.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// temp0 := ABS(__a[j+7:j] - __b[j+7:j])\n" "/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8])\n" "/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16])\n" "/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24])\n" "/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32])\n" "/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40])\n" "/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48])\n" "/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56])\n" "/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 +\n" "/// temp4 + temp5 + temp6 + temp7\n" "/// result[j+63:j+16] := 0\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSADBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sad_epu8(__m256i __a, __m256i __b)\n" "{\n" " return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);\n" "}\n" "\n" "/// Shuffles 8-bit integers in the 256-bit integer vector \\a __a according\n" "/// to control information in the 256-bit integer vector \\a __b, and\n" "/// returns the 256-bit result. In effect there are two separate 128-bit\n" "/// shuffles in the lower and upper halves.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// IF __b[j+7] == 1\n" "/// result[j+7:j] := 0\n" "/// ELSE\n" "/// k := __b[j+3:j] * 8\n" "/// IF i > 15\n" "/// k := k + 128\n" "/// FI\n" "/// result[j+7:j] := __a[k+7:k]\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSHUFB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing control information to determine\n" "/// what goes into the corresponding byte of the result. If bit 7 of the\n" "/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the\n" "/// control byte specify the index (within the same 128-bit half) of \\a __a\n" "/// to copy to the result byte.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_shuffle_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);\n" "}\n" "\n" "/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \\a a\n" "/// according to control information in the integer literal \\a imm, and\n" "/// returns the 256-bit result. In effect there are two parallel 128-bit\n" "/// shuffles in the lower and upper halves.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 to 3\n" "/// j := i*32\n" "/// k := (imm >> i*2)[1:0] * 32\n" "/// result[j+31:j] := a[k+31:k]\n" "/// result[128+j+31:128+j] := a[128+k+31:128+k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSHUFB instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [8 x i32] containing source values.\n" "/// \\param imm\n" "/// An immediate 8-bit value specifying which elements to copy from \\a a.\n" "/// \\a imm[1:0] specifies the index in \\a a for elements 0 and 4 of the\n" "/// result, \\a imm[3:2] specifies the index for elements 1 and 5, and so\n" "/// forth.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "#define _mm256_shuffle_epi32(a, imm) \\\n" " ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))\n" "\n" "/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \\a a\n" "/// according to control information in the integer literal \\a imm, and\n" "/// returns the 256-bit result. The upper 64 bits of each 128-bit half\n" "/// are shuffled in parallel; the lower 64 bits of each 128-bit half are\n" "/// copied from \\a a unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := a[63:0]\n" "/// result[191:128] := a[191:128]\n" "/// FOR i := 0 TO 3\n" "/// j := i * 16 + 64\n" "/// k := (imm >> i*2)[1:0] * 16 + 64\n" "/// result[j+15:j] := a[k+15:k]\n" "/// result[128+j+15:128+j] := a[128+k+15:128+k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSHUFHW instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [16 x i16] containing source values.\n" "/// \\param imm\n" "/// An immediate 8-bit value specifying which elements to copy from \\a a.\n" "/// \\a imm[1:0] specifies the index in \\a a for elements 4 and 8 of the\n" "/// result, \\a imm[3:2] specifies the index for elements 5 and 9, and so\n" "/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth).\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "#define _mm256_shufflehi_epi16(a, imm) \\\n" " ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))\n" "\n" "/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \\a a\n" "/// according to control information in the integer literal \\a imm, and\n" "/// returns the 256-bit [16 x i16] result. The lower 64 bits of each\n" "/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are\n" "/// copied from \\a a unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// result[127:64] := a[127:64]\n" "/// result[255:192] := a[255:192]\n" "/// FOR i := 0 TO 3\n" "/// j := i * 16\n" "/// k := (imm >> i*2)[1:0] * 16\n" "/// result[j+15:j] := a[k+15:k]\n" "/// result[128+j+15:128+j] := a[128+k+15:128+k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSHUFLW instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [16 x i16] to use as a source of data for the\n" "/// result.\n" "/// \\param imm\n" "/// An immediate 8-bit value specifying which elements to copy from \\a a.\n" "/// \\a imm[1:0] specifies the index in \\a a for elements 0 and 8 of the\n" "/// result, \\a imm[3:2] specifies the index for elements 1 and 9, and so\n" "/// forth.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "#define _mm256_shufflelo_epi16(a, imm) \\\n" " ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))\n" "\n" "/// Sets each byte of the result to the corresponding byte of the 256-bit\n" "/// integer vector in \\a __a, the negative of that byte, or zero, depending\n" "/// on whether the corresponding byte of the 256-bit integer vector in\n" "/// \\a __b is greater than zero, less than zero, or equal to zero,\n" "/// respectively.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGNB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector].\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sign_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);\n" "}\n" "\n" "/// Sets each element of the result to the corresponding element of the\n" "/// 256-bit vector of [16 x i16] in \\a __a, the negative of that element,\n" "/// or zero, depending on whether the corresponding element of the 256-bit\n" "/// vector of [16 x i16] in \\a __b is greater than zero, less than zero, or\n" "/// equal to zero, respectively.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGNW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16].\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sign_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Sets each element of the result to the corresponding element of the\n" "/// 256-bit vector of [8 x i32] in \\a __a, the negative of that element, or\n" "/// zero, depending on whether the corresponding element of the 256-bit\n" "/// vector of [8 x i32] in \\a __b is greater than zero, less than zero, or\n" "/// equal to zero, respectively.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGND instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sign_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Shifts each 128-bit half of the 256-bit integer vector \\a a left by\n" "/// \\a imm bytes, shifting in zero bytes, and returns the result. If \\a imm\n" "/// is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_slli_si256(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit integer vector to be shifted.\n" "/// \\param imm\n" "/// An unsigned immediate value specifying the shift count (in bytes).\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_slli_si256(a, imm) \\\n" " ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))\n" "\n" "/// Shifts each 128-bit half of the 256-bit integer vector \\a a left by\n" "/// \\a imm bytes, shifting in zero bytes, and returns the result. If \\a imm\n" "/// is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_bslli_epi128(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit integer vector to be shifted.\n" "/// \\param imm\n" "/// An unsigned immediate value specifying the shift count (in bytes).\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_bslli_epi128(a, imm) \\\n" " ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// left by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_slli_epi16(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);\n" "}\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// left by the number of bits specified by the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sll_epi16(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// left by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 31, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_slli_epi32(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// left by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 31, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sll_epi32(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __a\n" "/// left by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 63, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_slli_epi64(__m256i __a, int __count)\n" "{\n" " return __builtin_ia32_psllqi256((__v4di)__a, __count);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __a\n" "/// left by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 63, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sll_epi64(__m256i __a, __m128i __count)\n" "{\n" " return __builtin_ia32_psllq256((__v4di)__a, __count);\n" "}\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// right by \\a __count bits, shifting in sign bits, and returns the result.\n" "/// If \\a __count is greater than 15, each element of the result is either\n" "/// 0 or -1 according to the corresponding input sign bit.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srai_epi16(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);\n" "}\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// right by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in sign bits, and returns the result. If \\a __count is greater\n" "/// than 15, each element of the result is either 0 or -1 according to the\n" "/// corresponding input sign bit.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sra_epi16(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// right by \\a __count bits, shifting in sign bits, and returns the result.\n" "/// If \\a __count is greater than 31, each element of the result is either\n" "/// 0 or -1 according to the corresponding input sign bit.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srai_epi32(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// right by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in sign bits, and returns the result. If \\a __count is greater\n" "/// than 31, each element of the result is either 0 or -1 according to the\n" "/// corresponding input sign bit.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sra_epi32(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Shifts each 128-bit half of the 256-bit integer vector in \\a a right by\n" "/// \\a imm bytes, shifting in zero bytes, and returns the result. If\n" "/// \\a imm is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_srli_si256(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit integer vector to be shifted.\n" "/// \\param imm\n" "/// An unsigned immediate value specifying the shift count (in bytes).\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_srli_si256(a, imm) \\\n" " ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))\n" "\n" "/// Shifts each 128-bit half of the 256-bit integer vector in \\a a right by\n" "/// \\a imm bytes, shifting in zero bytes, and returns the result. If\n" "/// \\a imm is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit integer vector to be shifted.\n" "/// \\param imm\n" "/// An unsigned immediate value specifying the shift count (in bytes).\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_bsrli_epi128(a, imm) \\\n" " ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// right by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srli_epi16(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);\n" "}\n" "\n" "/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \\a __a\n" "/// right by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 15, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srl_epi16(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// right by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 31, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srli_epi32(__m256i __a, int __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __a\n" "/// right by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 31, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srl_epi32(__m256i __a, __m128i __count)\n" "{\n" " return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __a\n" "/// right by \\a __count bits, shifting in zero bits, and returns the result.\n" "/// If \\a __count is greater than 63, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __count\n" "/// An unsigned integer value specifying the shift count (in bits).\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srli_epi64(__m256i __a, int __count)\n" "{\n" " return __builtin_ia32_psrlqi256((__v4di)__a, __count);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __a\n" "/// right by the number of bits given in the lower 64 bits of \\a __count,\n" "/// shifting in zero bits, and returns the result. If \\a __count is greater\n" "/// than 63, the returned result is all zeroes.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __count\n" "/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned\n" "/// shift count (in bits). The upper element is ignored.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srl_epi64(__m256i __a, __m128i __count)\n" "{\n" " return __builtin_ia32_psrlq256((__v4di)__a, __count);\n" "}\n" "\n" "/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors. Returns the lower 8 bits of each difference in the\n" "/// corresponding byte of the 256-bit integer vector result (overflow is\n" "/// ignored).\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := __a[j+7:j] - __b[j+7:j]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing the subtrahends.\n" "/// \\returns A 256-bit integer vector containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sub_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v32qu)__a - (__v32qu)__b);\n" "}\n" "\n" "/// Subtracts 16-bit integers from corresponding elements of two 256-bit\n" "/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in\n" "/// the corresponding element of the [16 x i16] result (overflow is\n" "/// ignored).\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+15:j] := __a[j+15:j] - __b[j+15:j]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing the subtrahends.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sub_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v16hu)__a - (__v16hu)__b);\n" "}\n" "\n" "/// Subtracts 32-bit integers from corresponding elements of two 256-bit\n" "/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in\n" "/// the corresponding element of the [8 x i32] result (overflow is ignored).\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// result[j+31:j] := __a[j+31:j] - __b[j+31:j]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing the subtrahends.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sub_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v8su)__a - (__v8su)__b);\n" "}\n" "\n" "/// Subtracts 64-bit integers from corresponding elements of two 256-bit\n" "/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in\n" "/// the corresponding element of the [4 x i64] result (overflow is ignored).\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// result[j+63:j] := __a[j+63:j] - __b[j+63:j]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] containing the subtrahends.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sub_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4du)__a - (__v4du)__b);\n" "}\n" "\n" "/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors using signed saturation, and returns each differences in the\n" "/// corresponding byte of the 256-bit integer vector result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing the subtrahends.\n" "/// \\returns A 256-bit integer vector containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_subs_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);\n" "}\n" "\n" "/// Subtracts 16-bit integers from corresponding elements of two 256-bit\n" "/// vectors of [16 x i16] using signed saturation, and returns each\n" "/// difference in the corresponding element of the [16 x i16] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing the subtrahends.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_subs_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);\n" "}\n" "\n" "/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer\n" "/// vectors using unsigned saturation, and returns each difference in the\n" "/// corresponding byte of the 256-bit integer vector result. For each byte,\n" "/// computes result = __a - __b .\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 31\n" "/// j := i*8\n" "/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBUSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing the subtrahends.\n" "/// \\returns A 256-bit integer vector containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_subs_epu8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);\n" "}\n" "\n" "/// Subtracts 16-bit integers from corresponding elements of two 256-bit\n" "/// vectors of [16 x i16] using unsigned saturation, and returns each\n" "/// difference in the corresponding element of the [16 x i16] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 15\n" "/// j := i*16\n" "/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j])\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSUBUSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] containing the minuends.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] containing the subtrahends.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the differences.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_subs_epu16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);\n" "}\n" "\n" "/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer\n" "/// vectors in \\a __a and \\a __b to form the 256-bit result. Specifically,\n" "/// uses the upper 64 bits of each 128-bit half of \\a __a and \\a __b as\n" "/// input; other bits in these parameters are ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[7:0] := __a[71:64]\n" "/// result[15:8] := __b[71:64]\n" "/// result[23:16] := __a[79:72]\n" "/// result[31:24] := __b[79:72]\n" "/// . . .\n" "/// result[127:120] := __b[127:120]\n" "/// result[135:128] := __a[199:192]\n" "/// . . .\n" "/// result[255:248] := __b[255:248]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKHBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector used as the source for the even-numbered bytes\n" "/// of the result.\n" "/// \\param __b\n" "/// A 256-bit integer vector used as the source for the odd-numbered bytes\n" "/// of the result.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpackhi_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);\n" "}\n" "\n" "/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors\n" "/// of [16 x i16] in \\a __a and \\a __b to return the resulting 256-bit\n" "/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each\n" "/// 128-bit half of \\a __a and \\a __b as input; other bits in these\n" "/// parameters are ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[15:0] := __a[79:64]\n" "/// result[31:16] := __b[79:64]\n" "/// result[47:32] := __a[95:80]\n" "/// result[63:48] := __b[95:80]\n" "/// . . .\n" "/// result[127:112] := __b[127:112]\n" "/// result[143:128] := __a[211:196]\n" "/// . . .\n" "/// result[255:240] := __b[255:240]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKHWD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpackhi_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);\n" "}\n" "\n" "/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors\n" "/// of [8 x i32] in \\a __a and \\a __b to return the resulting 256-bit vector\n" "/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half\n" "/// of \\a __a and \\a __b as input; other bits in these parameters are\n" "/// ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[31:0] := __a[95:64]\n" "/// result[63:32] := __b[95:64]\n" "/// result[95:64] := __a[127:96]\n" "/// result[127:96] := __b[127:96]\n" "/// result[159:128] := __a[223:192]\n" "/// result[191:160] := __b[223:192]\n" "/// result[223:192] := __a[255:224]\n" "/// result[255:224] := __b[255:224]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKHDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpackhi_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);\n" "}\n" "\n" "/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors\n" "/// of [4 x i64] in \\a __a and \\a __b to return the resulting 256-bit vector\n" "/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half\n" "/// of \\a __a and \\a __b as input; other bits in these parameters are\n" "/// ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := __a[127:64]\n" "/// result[127:64] := __b[127:64]\n" "/// result[191:128] := __a[255:192]\n" "/// result[255:192] := __b[255:192]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKHQDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpackhi_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);\n" "}\n" "\n" "/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer\n" "/// vectors in \\a __a and \\a __b to form the 256-bit result. Specifically,\n" "/// uses the lower 64 bits of each 128-bit half of \\a __a and \\a __b as\n" "/// input; other bits in these parameters are ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[7:0] := __a[7:0]\n" "/// result[15:8] := __b[7:0]\n" "/// result[23:16] := __a[15:8]\n" "/// result[31:24] := __b[15:8]\n" "/// . . .\n" "/// result[127:120] := __b[63:56]\n" "/// result[135:128] := __a[135:128]\n" "/// . . .\n" "/// result[255:248] := __b[191:184]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKLBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector used as the source for the even-numbered bytes\n" "/// of the result.\n" "/// \\param __b\n" "/// A 256-bit integer vector used as the source for the odd-numbered bytes\n" "/// of the result.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpacklo_epi8(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);\n" "}\n" "\n" "/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors\n" "/// of [16 x i16] in \\a __a and \\a __b to return the resulting 256-bit\n" "/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each\n" "/// 128-bit half of \\a __a and \\a __b as input; other bits in these\n" "/// parameters are ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[15:0] := __a[15:0]\n" "/// result[31:16] := __b[15:0]\n" "/// result[47:32] := __a[31:16]\n" "/// result[63:48] := __b[31:16]\n" "/// . . .\n" "/// result[127:112] := __b[63:48]\n" "/// result[143:128] := __a[143:128]\n" "/// . . .\n" "/// result[255:239] := __b[191:176]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKLWD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [16 x i16] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpacklo_epi16(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);\n" "}\n" "\n" "/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors\n" "/// of [8 x i32] in \\a __a and \\a __b to return the resulting 256-bit vector\n" "/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half\n" "/// of \\a __a and \\a __b as input; other bits in these parameters are\n" "/// ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[31:0] := __a[31:0]\n" "/// result[63:32] := __b[31:0]\n" "/// result[95:64] := __a[63:32]\n" "/// result[127:96] := __b[63:32]\n" "/// result[159:128] := __a[159:128]\n" "/// result[191:160] := __b[159:128]\n" "/// result[223:192] := __a[191:160]\n" "/// result[255:224] := __b[191:190]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKLDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpacklo_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);\n" "}\n" "\n" "/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors\n" "/// of [4 x i64] in \\a __a and \\a __b to return the resulting 256-bit vector\n" "/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half\n" "/// of \\a __a and \\a __b as input; other bits in these parameters are\n" "/// ignored.\n" "///\n" "/// \\code{.operation}\n" "/// result[63:0] := __a[63:0]\n" "/// result[127:64] := __b[63:0]\n" "/// result[191:128] := __a[191:128]\n" "/// result[255:192] := __b[191:128]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPUNPCKLQDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x i64] used as the source for the even-numbered\n" "/// elements of the result.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered\n" "/// elements of the result.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_unpacklo_epi64(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);\n" "}\n" "\n" "/// Computes the bitwise XOR of the 256-bit integer vectors in \\a __a and\n" "/// \\a __b.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPXOR instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_xor_si256(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)((__v4du)__a ^ (__v4du)__b);\n" "}\n" "\n" "/// Loads the 256-bit integer vector from memory \\a __V using a non-temporal\n" "/// memory hint and returns the vector. \\a __V must be aligned on a 32-byte\n" "/// boundary.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VMOVNTDQA instruction.\n" "///\n" "/// \\param __V\n" "/// A pointer to the 32-byte aligned memory containing the vector to load.\n" "/// \\returns A 256-bit integer vector loaded from memory.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_stream_load_si256(__m256i const *__V)\n" "{\n" " typedef __v4di __v4di_aligned __attribute__((aligned(32)));\n" " return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);\n" "}\n" "\n" "/// Broadcasts the 32-bit floating-point value from the low element of the\n" "/// 128-bit vector of [4 x float] in \\a __X to all elements of the result's\n" "/// 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VBROADCASTSS instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x float] whose low element will be broadcast.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_broadcastss_ps(__m128 __X)\n" "{\n" " return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the 64-bit floating-point value from the low element of the\n" "/// 128-bit vector of [2 x double] in \\a __a to both elements of the\n" "/// result's 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MOVDDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] whose low element will be broadcast.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_broadcastsd_pd(__m128d __a)\n" "{\n" " return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);\n" "}\n" "\n" "/// Broadcasts the 32-bit floating-point value from the low element of the\n" "/// 128-bit vector of [4 x float] in \\a __X to all elements of the\n" "/// result's 256-bit vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VBROADCASTSS instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x float] whose low element will be broadcast.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastss_ps(__m128 __X)\n" "{\n" " return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the 64-bit floating-point value from the low element of the\n" "/// 128-bit vector of [2 x double] in \\a __X to all elements of the\n" "/// result's 256-bit vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VBROADCASTSD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x double] whose low element will be broadcast.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastsd_pd(__m128d __X)\n" "{\n" " return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the 128-bit integer data from \\a __X to both the lower and\n" "/// upper halves of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VBROADCASTI128 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit integer vector to be broadcast.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastsi128_si256(__m128i __X)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);\n" "}\n" "\n" "#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)\n" "\n" "/// Merges 32-bit integer elements from either of the two 128-bit vectors of\n" "/// [4 x i32] in \\a V1 or \\a V2 to the result's 128-bit vector of [4 x i32],\n" "/// as specified by the immediate integer operand \\a M.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*32\n" "/// IF M[i] == 0\n" "/// result[31+j:j] := V1[31+j:j]\n" "/// ELSE\n" "/// result[31+j:j] := V2[32+j:j]\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPBLENDDD instruction.\n" "///\n" "/// \\param V1\n" "/// A 128-bit vector of [4 x i32] containing source values.\n" "/// \\param V2\n" "/// A 128-bit vector of [4 x i32] containing source values.\n" "/// \\param M\n" "/// An immediate 8-bit integer operand, with bits [3:0] specifying the\n" "/// source for each element of the result. The position of the mask bit\n" "/// corresponds to the index of a copied value. When a mask bit is 0, the\n" "/// element is copied from \\a V1; otherwise, it is copied from \\a V2.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the result.\n" "#define _mm_blend_epi32(V1, V2, M) \\\n" " ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \\\n" " (__v4si)(__m128i)(V2), (int)(M)))\n" "\n" "/// Merges 32-bit integer elements from either of the two 256-bit vectors of\n" "/// [8 x i32] in \\a V1 or \\a V2 to return a 256-bit vector of [8 x i32],\n" "/// as specified by the immediate integer operand \\a M.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// IF M[i] == 0\n" "/// result[31+j:j] := V1[31+j:j]\n" "/// ELSE\n" "/// result[31+j:j] := V2[32+j:j]\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPBLENDDD instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [8 x i32] containing source values.\n" "/// \\param V2\n" "/// A 256-bit vector of [8 x i32] containing source values.\n" "/// \\param M\n" "/// An immediate 8-bit integer operand, with bits [7:0] specifying the\n" "/// source for each element of the result. The position of the mask bit\n" "/// corresponds to the index of a copied value. When a mask bit is 0, the\n" "/// element is copied from \\a V1; otherwise, it is is copied from \\a V2.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "#define _mm256_blend_epi32(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \\\n" " (__v8si)(__m256i)(V2), (int)(M)))\n" "\n" "/// Broadcasts the low byte from the 128-bit integer vector in \\a __X to all\n" "/// bytes of the 256-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTB instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit integer vector whose low byte will be broadcast.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastb_epi8(__m128i __X)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \\a __X\n" "/// to all elements of the result's 256-bit vector of [16 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTW instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [8 x i16] whose low element will be broadcast.\n" "/// \\returns A 256-bit vector of [16 x i16] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastw_epi16(__m128i __X)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \\a __X\n" "/// to all elements of the result's 256-bit vector of [8 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] whose low element will be broadcast.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastd_epi32(__m128i __X)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \\a __X\n" "/// to all elements of the result's 256-bit vector of [4 x i64].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64] whose low element will be broadcast.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_broadcastq_epi64(__m128i __X)\n" "{\n" " return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low byte from the 128-bit integer vector in \\a __X to all\n" "/// bytes of the 128-bit result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTB instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit integer vector whose low byte will be broadcast.\n" "/// \\returns A 128-bit integer vector containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_broadcastb_epi8(__m128i __X)\n" "{\n" " return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [8 x i16] in\n" "/// \\a __X to all elements of the result's 128-bit vector of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTW instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [8 x i16] whose low element will be broadcast.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_broadcastw_epi16(__m128i __X)\n" "{\n" " return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \\a __X\n" "/// to all elements of the result's vector of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] whose low element will be broadcast.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_broadcastd_epi32(__m128i __X)\n" "{\n" " return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);\n" "}\n" "\n" "/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \\a __X\n" "/// to both elements of the result's 128-bit vector of [2 x i64].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPBROADCASTQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64] whose low element will be broadcast.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_broadcastq_epi64(__m128i __X)\n" "{\n" " return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);\n" "}\n" "\n" "/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the\n" "/// 256-bit vector of [8 x i32] in \\a __a as specified by indexes in the\n" "/// elements of the 256-bit vector of [8 x i32] in \\a __b.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// k := __b[j+2:j] * 32\n" "/// result[j+31:j] := __a[k+31:k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPERMD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32] containing the source values.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing indexes of values to use from\n" "/// \\a __a.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)\n" "{\n" " return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);\n" "}\n" "\n" "/// Sets the result's 256-bit vector of [4 x double] to copies of elements of\n" "/// the 256-bit vector of [4 x double] in \\a V as specified by the\n" "/// immediate value \\a M.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// k := (M >> i*2)[1:0] * 64\n" "/// result[j+63:j] := V[k+63:k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_permute4x64_pd(__m256d V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPERMPD instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x double] containing the source values.\n" "/// \\param M\n" "/// An immediate 8-bit value specifying which elements to copy from \\a V.\n" "/// \\a M[1:0] specifies the index in \\a a for element 0 of the result,\n" "/// \\a M[3:2] specifies the index for element 1, and so forth.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "#define _mm256_permute4x64_pd(V, M) \\\n" " ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))\n" "\n" "/// Sets the result's 256-bit vector of [8 x float] to copies of elements of\n" "/// the 256-bit vector of [8 x float] in \\a __a as specified by indexes in\n" "/// the elements of the 256-bit vector of [8 x i32] in \\a __b.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// k := __b[j+2:j] * 32\n" "/// result[j+31:j] := __a[k+31:k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPERMPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the source values.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x i32] containing indexes of values to use from\n" "/// \\a __a.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)\n" "{\n" " return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);\n" "}\n" "\n" "/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements\n" "/// of the 256-bit vector of [4 x i64] in \\a V as specified by the\n" "/// immediate value \\a M.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// k := (M >> i*2)[1:0] * 64\n" "/// result[j+63:j] := V[k+63:k]\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPERMQ instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x i64] containing the source values.\n" "/// \\param M\n" "/// An immediate 8-bit value specifying which elements to copy from \\a V.\n" "/// \\a M[1:0] specifies the index in \\a a for element 0 of the result,\n" "/// \\a M[3:2] specifies the index for element 1, and so forth.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "#define _mm256_permute4x64_epi64(V, M) \\\n" " ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))\n" "\n" "/// Sets each half of the 256-bit result either to zero or to one of the\n" "/// four possible 128-bit halves of the 256-bit vectors \\a V1 and \\a V2,\n" "/// as specified by the immediate value \\a M.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*128\n" "/// k := M >> (i*4)\n" "/// IF k[3] == 0\n" "/// CASE (k[1:0]) OF\n" "/// 0: result[127+j:j] := V1[127:0]\n" "/// 1: result[127+j:j] := V1[255:128]\n" "/// 2: result[127+j:j] := V2[127:0]\n" "/// 3: result[127+j:j] := V2[255:128]\n" "/// ESAC\n" "/// ELSE\n" "/// result[127+j:j] := 0\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPERM2I128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param V2\n" "/// A 256-bit integer vector containing source values.\n" "/// \\param M\n" "/// An immediate value specifying how to form the result. Bits [3:0]\n" "/// control the lower half of the result, bits [7:4] control the upper half.\n" "/// Within each 4-bit control value, if bit 3 is 1, the result is zero,\n" "/// otherwise bits [1:0] determine the source as follows. \\n\n" "/// 0: the lower half of \\a V1 \\n\n" "/// 1: the upper half of \\a V1 \\n\n" "/// 2: the lower half of \\a V2 \\n\n" "/// 3: the upper half of \\a V2\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_permute2x128_si256(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))\n" "\n" "/// Extracts half of the 256-bit vector \\a V to the 128-bit result. If bit 0\n" "/// of the immediate \\a M is zero, extracts the lower half of the result;\n" "/// otherwise, extracts the upper half.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm256_extracti128_si256(__m256i V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VEXTRACTI128 instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit integer vector containing the source values.\n" "/// \\param M\n" "/// An immediate value specifying which half of \\a V to extract.\n" "/// \\returns A 128-bit integer vector containing the result.\n" "#define _mm256_extracti128_si256(V, M) \\\n" " ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))\n" "\n" "/// Copies the 256-bit vector \\a V1 to the result, then overwrites half of the\n" "/// result with the 128-bit vector \\a V2. If bit 0 of the immediate \\a M\n" "/// is zero, overwrites the lower half of the result; otherwise,\n" "/// overwrites the upper half.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VINSERTI128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit integer vector containing a source value.\n" "/// \\param V2\n" "/// A 128-bit integer vector containing a source value.\n" "/// \\param M\n" "/// An immediate value specifying where to put \\a V2 in the result.\n" "/// \\returns A 256-bit integer vector containing the result.\n" "#define _mm256_inserti128_si256(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \\\n" " (__v2di)(__m128i)(V2), (int)(M)))\n" "\n" "/// Conditionally loads eight 32-bit integer elements from memory \\a __X, if\n" "/// the most significant bit of the corresponding element in the mask\n" "/// \\a __M is set; otherwise, sets that element of the result to zero.\n" "/// Returns the 256-bit [8 x i32] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// IF __M[j+31] == 1\n" "/// result[j+31:j] := Load32(__X+(i*4))\n" "/// ELSE\n" "/// result[j+31:j] := 0\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVD instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for loading values.\n" "/// \\param __M\n" "/// A 256-bit vector of [8 x i32] containing the mask bits.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the loaded or zeroed\n" "/// elements.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_maskload_epi32(int const *__X, __m256i __M)\n" "{\n" " return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);\n" "}\n" "\n" "/// Conditionally loads four 64-bit integer elements from memory \\a __X, if\n" "/// the most significant bit of the corresponding element in the mask\n" "/// \\a __M is set; otherwise, sets that element of the result to zero.\n" "/// Returns the 256-bit [4 x i64] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// IF __M[j+63] == 1\n" "/// result[j+63:j] := Load64(__X+(i*8))\n" "/// ELSE\n" "/// result[j+63:j] := 0\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for loading values.\n" "/// \\param __M\n" "/// A 256-bit vector of [4 x i64] containing the mask bits.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the loaded or zeroed\n" "/// elements.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_maskload_epi64(long long const *__X, __m256i __M)\n" "{\n" " return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);\n" "}\n" "\n" "/// Conditionally loads four 32-bit integer elements from memory \\a __X, if\n" "/// the most significant bit of the corresponding element in the mask\n" "/// \\a __M is set; otherwise, sets that element of the result to zero.\n" "/// Returns the 128-bit [4 x i32] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*32\n" "/// IF __M[j+31] == 1\n" "/// result[j+31:j] := Load32(__X+(i*4))\n" "/// ELSE\n" "/// result[j+31:j] := 0\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVD instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for loading values.\n" "/// \\param __M\n" "/// A 128-bit vector of [4 x i32] containing the mask bits.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the loaded or zeroed\n" "/// elements.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_maskload_epi32(int const *__X, __m128i __M)\n" "{\n" " return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);\n" "}\n" "\n" "/// Conditionally loads two 64-bit integer elements from memory \\a __X, if\n" "/// the most significant bit of the corresponding element in the mask\n" "/// \\a __M is set; otherwise, sets that element of the result to zero.\n" "/// Returns the 128-bit [2 x i64] result.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*64\n" "/// IF __M[j+63] == 1\n" "/// result[j+63:j] := Load64(__X+(i*8))\n" "/// ELSE\n" "/// result[j+63:j] := 0\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for loading values.\n" "/// \\param __M\n" "/// A 128-bit vector of [2 x i64] containing the mask bits.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the loaded or zeroed\n" "/// elements.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_maskload_epi64(long long const *__X, __m128i __M)\n" "{\n" " return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);\n" "}\n" "\n" "/// Conditionally stores eight 32-bit integer elements from the 256-bit vector\n" "/// of [8 x i32] in \\a __Y to memory \\a __X, if the most significant bit of\n" "/// the corresponding element in the mask \\a __M is set; otherwise, the\n" "/// memory element is unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 7\n" "/// j := i*32\n" "/// IF __M[j+31] == 1\n" "/// Store32(__X+(i*4), __Y[j+31:j])\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVD instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for storing values.\n" "/// \\param __M\n" "/// A 256-bit vector of [8 x i32] containing the mask bits.\n" "/// \\param __Y\n" "/// A 256-bit vector of [8 x i32] containing the values to store.\n" "static __inline__ void __DEFAULT_FN_ATTRS256\n" "_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)\n" "{\n" " __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);\n" "}\n" "\n" "/// Conditionally stores four 64-bit integer elements from the 256-bit vector\n" "/// of [4 x i64] in \\a __Y to memory \\a __X, if the most significant bit of\n" "/// the corresponding element in the mask \\a __M is set; otherwise, the\n" "/// memory element is unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*64\n" "/// IF __M[j+63] == 1\n" "/// Store64(__X+(i*8), __Y[j+63:j])\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for storing values.\n" "/// \\param __M\n" "/// A 256-bit vector of [4 x i64] containing the mask bits.\n" "/// \\param __Y\n" "/// A 256-bit vector of [4 x i64] containing the values to store.\n" "static __inline__ void __DEFAULT_FN_ATTRS256\n" "_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)\n" "{\n" " __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);\n" "}\n" "\n" "/// Conditionally stores four 32-bit integer elements from the 128-bit vector\n" "/// of [4 x i32] in \\a __Y to memory \\a __X, if the most significant bit of\n" "/// the corresponding element in the mask \\a __M is set; otherwise, the\n" "/// memory element is unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 3\n" "/// j := i*32\n" "/// IF __M[j+31] == 1\n" "/// Store32(__X+(i*4), __Y[j+31:j])\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVD instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for storing values.\n" "/// \\param __M\n" "/// A 128-bit vector of [4 x i32] containing the mask bits.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing the values to store.\n" "static __inline__ void __DEFAULT_FN_ATTRS128\n" "_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)\n" "{\n" " __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);\n" "}\n" "\n" "/// Conditionally stores two 64-bit integer elements from the 128-bit vector\n" "/// of [2 x i64] in \\a __Y to memory \\a __X, if the most significant bit of\n" "/// the corresponding element in the mask \\a __M is set; otherwise, the\n" "/// memory element is unchanged.\n" "///\n" "/// \\code{.operation}\n" "/// FOR i := 0 TO 1\n" "/// j := i*64\n" "/// IF __M[j+63] == 1\n" "/// Store64(__X+(i*8), __Y[j+63:j])\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMASKMOVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A pointer to the memory used for storing values.\n" "/// \\param __M\n" "/// A 128-bit vector of [2 x i64] containing the mask bits.\n" "/// \\param __Y\n" "/// A 128-bit vector of [2 x i64] containing the values to store.\n" "static __inline__ void __DEFAULT_FN_ATTRS128\n" "_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)\n" "{\n" " __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __X\n" "/// left by the number of bits given in the corresponding element of the\n" "/// 256-bit vector of [8 x i32] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sllv_epi32(__m256i __X, __m256i __Y)\n" "{\n" " return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \\a __X\n" "/// left by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [4 x i32] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 128-bit vector of [4 x i32] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_sllv_epi32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __X\n" "/// left by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [4 x i64] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 63, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __Y\n" "/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sllv_epi64(__m256i __X, __m256i __Y)\n" "{\n" " return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \\a __X\n" "/// left by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [2 x i64] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 63, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSLLVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64] to be shifted.\n" "/// \\param __Y\n" "/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 128-bit vector of [2 x i64] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_sllv_epi64(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 256-bit vector of [8 x i32] in \\a __Y, shifting in sign bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is 0 or -1 according to the sign bit\n" "/// for that element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srav_epi32(__m256i __X, __m256i __Y)\n" "{\n" " return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [4 x i32] in \\a __Y, shifting in sign bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is 0 or -1 according to the sign bit\n" "/// for that element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRAVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 128-bit vector of [4 x i32] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_srav_epi32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 256-bit vector of [8 x i32] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 256-bit vector of [8 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 256-bit vector of [8 x i32] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srlv_epi32(__m256i __X, __m256i __Y)\n" "{\n" " return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);\n" "}\n" "\n" "/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [4 x i32] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 31, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLVD instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] to be shifted.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 128-bit vector of [4 x i32] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_srlv_epi32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [4 x i64] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 63, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 256-bit vector of [4 x i64] to be shifted.\n" "/// \\param __Y\n" "/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 256-bit vector of [4 x i64] containing the result.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_srlv_epi64(__m256i __X, __m256i __Y)\n" "{\n" " return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);\n" "}\n" "\n" "/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \\a __X\n" "/// right by the number of bits given in the corresponding element of the\n" "/// 128-bit vector of [2 x i64] in \\a __Y, shifting in zero bits, and\n" "/// returns the result. If the shift count for any element is greater than\n" "/// 63, the result for that element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSRLVQ instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64] to be shifted.\n" "/// \\param __Y\n" "/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in\n" "/// bits).\n" "/// \\returns A 128-bit vector of [2 x i64] containing the result.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_srlv_epi64(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);\n" "}\n" "\n" "/// Conditionally gathers two 64-bit floating-point values, either from the\n" "/// 128-bit vector of [2 x double] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 128-bit vector\n" "/// of [2 x double] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*32\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i,\n" "/// __m128d mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x double] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m. Only\n" "/// the first two elements are used.\n" "/// \\param mask\n" "/// A 128-bit vector of [2 x double] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x double] containing the gathered values.\n" "#define _mm_mask_i32gather_pd(a, m, i, mask, s) \\\n" " ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \\\n" " (double const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v2df)(__m128d)(mask), (s)))\n" "\n" "/// Conditionally gathers four 64-bit floating-point values, either from the\n" "/// 256-bit vector of [4 x double] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 256-bit vector\n" "/// of [4 x double] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*32\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i,\n" "/// __m256d mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPD instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x double] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [4 x double] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x double] containing the gathered values.\n" "#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \\\n" " ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \\\n" " (double const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4df)(__m256d)(mask), (s)))\n" "\n" "/// Conditionally gathers two 64-bit floating-point values, either from the\n" "/// 128-bit vector of [2 x double] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i. The 128-bit vector\n" "/// of [2 x double] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*64\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i,\n" "/// __m128d mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x double] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [2 x double] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x double] containing the gathered values.\n" "#define _mm_mask_i64gather_pd(a, m, i, mask, s) \\\n" " ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \\\n" " (double const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v2df)(__m128d)(mask), (s)))\n" "\n" "/// Conditionally gathers four 64-bit floating-point values, either from the\n" "/// 256-bit vector of [4 x double] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i. The 256-bit vector\n" "/// of [4 x double] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*64\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i,\n" "/// __m256d mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPD instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x double] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [4 x double] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x double] containing the gathered values.\n" "#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \\\n" " ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \\\n" " (double const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4df)(__m256d)(mask), (s)))\n" "\n" "/// Conditionally gathers four 32-bit floating-point values, either from the\n" "/// 128-bit vector of [4 x float] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 128-bit vector\n" "/// of [4 x float] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*32\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i,\n" "/// __m128 mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x float] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm_mask_i32gather_ps(a, m, i, mask, s) \\\n" " ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \\\n" " (float const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4sf)(__m128)(mask), (s)))\n" "\n" "/// Conditionally gathers eight 32-bit floating-point values, either from the\n" "/// 256-bit vector of [8 x float] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [8 x i32] in \\a i. The 256-bit vector\n" "/// of [8 x float] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 7\n" "/// j := element*32\n" "/// k := element*32\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i,\n" "/// __m256 mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPS instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [8 x float] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [8 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [8 x float] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [8 x float] containing the gathered values.\n" "#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \\\n" " ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \\\n" " (float const *)(m), \\\n" " (__v8si)(__m256i)(i), \\\n" " (__v8sf)(__m256)(mask), (s)))\n" "\n" "/// Conditionally gathers two 32-bit floating-point values, either from the\n" "/// 128-bit vector of [4 x float] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i. The 128-bit vector\n" "/// of [4 x float] in \\a mask determines the source for the lower two\n" "/// elements. The upper two elements of the result are zeroed.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*32\n" "/// k := element*64\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// result[127:64] := 0\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i,\n" "/// __m128 mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float] used as the source when a mask bit is\n" "/// zero. Only the first two elements are used.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x float] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory. Only the first\n" "/// two elements are used.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm_mask_i64gather_ps(a, m, i, mask, s) \\\n" " ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \\\n" " (float const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v4sf)(__m128)(mask), (s)))\n" "\n" "/// Conditionally gathers four 32-bit floating-point values, either from the\n" "/// 128-bit vector of [4 x float] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i. The 128-bit vector\n" "/// of [4 x float] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*64\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i,\n" "/// __m128 mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x float] containing the mask. The most\n" "/// significant bit of each element in the mask vector represents the mask\n" "/// bits. If a mask bit is zero, the corresponding value from vector \\a a\n" "/// is gathered; otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \\\n" " ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \\\n" " (float const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4sf)(__m128)(mask), (s)))\n" "\n" "/// Conditionally gathers four 32-bit integer values, either from the\n" "/// 128-bit vector of [4 x i32] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 128-bit vector\n" "/// of [4 x i32] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*32\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i,\n" "/// __m128i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x i32] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x i32] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \\\n" " ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \\\n" " (int const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4si)(__m128i)(mask), (s)))\n" "\n" "/// Conditionally gathers eight 32-bit integer values, either from the\n" "/// 256-bit vector of [8 x i32] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [8 x i32] in \\a i. The 256-bit vector\n" "/// of [8 x i32] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 7\n" "/// j := element*32\n" "/// k := element*32\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i,\n" "/// __m256i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDD instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [8 x i32] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [8 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [8 x i32] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the gathered values.\n" "#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \\\n" " ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \\\n" " (int const *)(m), \\\n" " (__v8si)(__m256i)(i), \\\n" " (__v8si)(__m256i)(mask), (s)))\n" "\n" "/// Conditionally gathers two 32-bit integer values, either from the\n" "/// 128-bit vector of [4 x i32] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i. The 128-bit vector\n" "/// of [4 x i32] in \\a mask determines the source for the lower two\n" "/// elements. The upper two elements of the result are zeroed.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*32\n" "/// k := element*64\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// result[127:64] := 0\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i,\n" "/// __m128i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x i32] used as the source when a mask bit is\n" "/// zero. Only the first two elements are used.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x i32] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory. Only the first two elements\n" "/// are used.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \\\n" " (int const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v4si)(__m128i)(mask), (s)))\n" "\n" "/// Conditionally gathers four 32-bit integer values, either from the\n" "/// 128-bit vector of [4 x i32] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i. The 128-bit vector\n" "/// of [4 x i32] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*64\n" "/// IF mask[j+31] == 0\n" "/// result[j+31:j] := a[j+31:j]\n" "/// ELSE\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i,\n" "/// __m128i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x i32] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [4 x i32] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \\\n" " (int const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4si)(__m128i)(mask), (s)))\n" "\n" "/// Conditionally gathers two 64-bit integer values, either from the\n" "/// 128-bit vector of [2 x i64] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 128-bit vector\n" "/// of [2 x i64] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*32\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i,\n" "/// __m128i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x i64] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m. Only\n" "/// the first two elements are used.\n" "/// \\param mask\n" "/// A 128-bit vector of [2 x i64] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the gathered values.\n" "#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \\\n" " ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \\\n" " (long long const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v2di)(__m128i)(mask), (s)))\n" "\n" "/// Conditionally gathers four 64-bit integer values, either from the\n" "/// 256-bit vector of [4 x i64] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i. The 256-bit vector\n" "/// of [4 x i64] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*32\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m,\n" "/// __m128i i, __m256i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x i64] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [4 x i64] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the gathered values.\n" "#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \\\n" " ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \\\n" " (long long const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4di)(__m256i)(mask), (s)))\n" "\n" "/// Conditionally gathers two 64-bit integer values, either from the\n" "/// 128-bit vector of [2 x i64] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i. The 128-bit vector\n" "/// of [2 x i64] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*64\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i,\n" "/// __m128i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQQ instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x i64] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 128-bit vector of [2 x i64] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the gathered values.\n" "#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \\\n" " (long long const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v2di)(__m128i)(mask), (s)))\n" "\n" "/// Conditionally gathers four 64-bit integer values, either from the\n" "/// 256-bit vector of [4 x i64] in \\a a, or from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i. The 256-bit vector\n" "/// of [4 x i64] in \\a mask determines the source for each element.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*64\n" "/// IF mask[j+63] == 0\n" "/// result[j+63:j] := a[j+63:j]\n" "/// ELSE\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// FI\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m,\n" "/// __m256i i, __m256i mask, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQQ instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x i64] used as the source when a mask bit is\n" "/// zero.\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param mask\n" "/// A 256-bit vector of [4 x i64] containing the mask. The most significant\n" "/// bit of each element in the mask vector represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value from vector \\a a is gathered;\n" "/// otherwise the value is loaded from memory.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the gathered values.\n" "#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \\\n" " ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \\\n" " (long long const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4di)(__m256i)(mask), (s)))\n" "\n" "/// Gathers two 64-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*32\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m. Only\n" "/// the first two elements are used.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x double] containing the gathered values.\n" "#define _mm_i32gather_pd(m, i, s) \\\n" " ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \\\n" " (double const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \\\n" " _mm_setzero_pd()), \\\n" " (s)))\n" "\n" "/// Gathers four 64-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*32\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x double] containing the gathered values.\n" "#define _mm256_i32gather_pd(m, i, s) \\\n" " ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \\\n" " (double const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \\\n" " _mm256_setzero_pd(), \\\n" " _CMP_EQ_OQ), \\\n" " (s)))\n" "\n" "/// Gathers two 64-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*64\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x double] containing the gathered values.\n" "#define _mm_i64gather_pd(m, i, s) \\\n" " ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \\\n" " (double const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \\\n" " _mm_setzero_pd()), \\\n" " (s)))\n" "\n" "/// Gathers four 64-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*64\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x double] containing the gathered values.\n" "#define _mm256_i64gather_pd(m, i, s) \\\n" " ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \\\n" " (double const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \\\n" " _mm256_setzero_pd(), \\\n" " _CMP_EQ_OQ), \\\n" " (s)))\n" "\n" "/// Gathers four 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*32\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPS instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm_i32gather_ps(m, i, s) \\\n" " ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \\\n" " (float const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \\\n" " _mm_setzero_ps()), \\\n" " (s)))\n" "\n" "/// Gathers eight 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [8 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 7\n" "/// j := element*32\n" "/// k := element*32\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERDPS instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [8 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [8 x float] containing the gathered values.\n" "#define _mm256_i32gather_ps(m, i, s) \\\n" " ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \\\n" " (float const *)(m), \\\n" " (__v8si)(__m256i)(i), \\\n" " (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \\\n" " _mm256_setzero_ps(), \\\n" " _CMP_EQ_OQ), \\\n" " (s)))\n" "\n" "/// Gathers two 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [2 x i64] in \\a i. The upper two\n" "/// elements of the result are zeroed.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*32\n" "/// k := element*64\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// result[127:64] := 0\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPS instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm_i64gather_ps(m, i, s) \\\n" " ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \\\n" " (float const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \\\n" " _mm_setzero_ps()), \\\n" " (s)))\n" "\n" "/// Gathers four 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [4 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*64\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VGATHERQPS instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x float] containing the gathered values.\n" "#define _mm256_i64gather_ps(m, i, s) \\\n" " ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \\\n" " (float const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \\\n" " _mm_setzero_ps()), \\\n" " (s)))\n" "\n" "/// Gathers four 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*32\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm_i32gather_epi32(m, i, s) \\\n" " ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \\\n" " (int const *)(m), (__v4si)(__m128i)(i), \\\n" " (__v4si)_mm_set1_epi32(-1), (s)))\n" "\n" "/// Gathers eight 32-bit floating-point values from memory \\a m using scaled\n" "/// indexes from the 256-bit vector of [8 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 7\n" "/// j := element*32\n" "/// k := element*32\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [8 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [8 x i32] containing the gathered values.\n" "#define _mm256_i32gather_epi32(m, i, s) \\\n" " ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \\\n" " (int const *)(m), (__v8si)(__m256i)(i), \\\n" " (__v8si)_mm256_set1_epi32(-1), (s)))\n" "\n" "/// Gathers two 32-bit integer values from memory \\a m using scaled indexes\n" "/// from the 128-bit vector of [2 x i64] in \\a i. The upper two elements\n" "/// of the result are zeroed.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*32\n" "/// k := element*64\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// result[127:64] := 0\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm_i64gather_epi32(m, i, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \\\n" " (int const *)(m), (__v2di)(__m128i)(i), \\\n" " (__v4si)_mm_set1_epi32(-1), (s)))\n" "\n" "/// Gathers four 32-bit integer values from memory \\a m using scaled indexes\n" "/// from the 256-bit vector of [4 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*32\n" "/// k := element*64\n" "/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQD instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the gathered values.\n" "#define _mm256_i64gather_epi32(m, i, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \\\n" " (int const *)(m), (__v4di)(__m256i)(i), \\\n" " (__v4si)_mm_set1_epi32(-1), (s)))\n" "\n" "/// Gathers two 64-bit integer values from memory \\a m using scaled indexes\n" "/// from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*32\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDQ instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m. Only\n" "/// the first two elements are used.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the gathered values.\n" "#define _mm_i32gather_epi64(m, i, s) \\\n" " ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \\\n" " (long long const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v2di)_mm_set1_epi64x(-1), (s)))\n" "\n" "/// Gathers four 64-bit integer values from memory \\a m using scaled indexes\n" "/// from the 128-bit vector of [4 x i32] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*32\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERDQ instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [4 x i32] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the gathered values.\n" "#define _mm256_i32gather_epi64(m, i, s) \\\n" " ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \\\n" " (long long const *)(m), \\\n" " (__v4si)(__m128i)(i), \\\n" " (__v4di)_mm256_set1_epi64x(-1), (s)))\n" "\n" "/// Gathers two 64-bit integer values from memory \\a m using scaled indexes\n" "/// from the 128-bit vector of [2 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 1\n" "/// j := element*64\n" "/// k := element*64\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQQ instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 128-bit vector of [2 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the gathered values.\n" "#define _mm_i64gather_epi64(m, i, s) \\\n" " ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \\\n" " (long long const *)(m), \\\n" " (__v2di)(__m128i)(i), \\\n" " (__v2di)_mm_set1_epi64x(-1), (s)))\n" "\n" "/// Gathers four 64-bit integer values from memory \\a m using scaled indexes\n" "/// from the 256-bit vector of [4 x i64] in \\a i.\n" "///\n" "/// \\code{.operation}\n" "/// FOR element := 0 to 3\n" "/// j := element*64\n" "/// k := element*64\n" "/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPGATHERQQ instruction.\n" "///\n" "/// \\param m\n" "/// A pointer to the memory used for loading values.\n" "/// \\param i\n" "/// A 256-bit vector of [4 x i64] containing signed indexes into \\a m.\n" "/// \\param s\n" "/// A literal constant scale factor for the indexes in \\a i. Must be\n" "/// 1, 2, 4, or 8.\n" "/// \\returns A 256-bit vector of [4 x i64] containing the gathered values.\n" "#define _mm256_i64gather_epi64(m, i, s) \\\n" " ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \\\n" " (long long const *)(m), \\\n" " (__v4di)(__m256i)(i), \\\n" " (__v4di)_mm256_set1_epi64x(-1), (s)))\n" "\n" "#undef __DEFAULT_FN_ATTRS256\n" "#undef __DEFAULT_FN_ATTRS128\n" "\n" "#endif /* __AVX2INTRIN_H */\n" "" } , { "/builtins/avxifmaintrin.h" , "/*===----------------- avxifmaintrin.h - IFMA intrinsics -------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __AVXIFMAINTRIN_H\n" "#define __AVXIFMAINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxifma\"), \\\n" " __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxifma\"), \\\n" " __min_vector_width__(256)))\n" "\n" "// must vex-encoding\n" "\n" "/// Multiply packed unsigned 52-bit integers in each 64-bit element of \\a __Y\n" "/// and \\a __Z to form a 104-bit intermediate result. Add the high 52-bit\n" "/// unsigned integer from the intermediate result with the corresponding\n" "/// unsigned 64-bit integer in \\a __X, and store the results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i\n" "/// _mm_madd52hi_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPMADD52HUQ instruction.\n" "///\n" "/// \\return\n" "/// return __m128i dst.\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64]\n" "/// \\param __Y\n" "/// A 128-bit vector of [2 x i64]\n" "/// \\param __Z\n" "/// A 128-bit vector of [2 x i64]\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 1\n" "/// i := j*64\n" "/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])\n" "/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {\n" " return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di)__X, (__v2di)__Y,\n" " (__v2di)__Z);\n" "}\n" "\n" "/// Multiply packed unsigned 52-bit integers in each 64-bit element of \\a __Y\n" "/// and \\a __Z to form a 104-bit intermediate result. Add the high 52-bit\n" "/// unsigned integer from the intermediate result with the corresponding\n" "/// unsigned 64-bit integer in \\a __X, and store the results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i\n" "/// _mm256_madd52hi_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPMADD52HUQ instruction.\n" "///\n" "/// \\return\n" "/// return __m256i dst.\n" "/// \\param __X\n" "/// A 256-bit vector of [4 x i64]\n" "/// \\param __Y\n" "/// A 256-bit vector of [4 x i64]\n" "/// \\param __Z\n" "/// A 256-bit vector of [4 x i64]\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// i := j*64\n" "/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])\n" "/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {\n" " return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y,\n" " (__v4di)__Z);\n" "}\n" "\n" "/// Multiply packed unsigned 52-bit integers in each 64-bit element of \\a __Y\n" "/// and \\a __Z to form a 104-bit intermediate result. Add the low 52-bit\n" "/// unsigned integer from the intermediate result with the corresponding\n" "/// unsigned 64-bit integer in \\a __X, and store the results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i\n" "/// _mm_madd52lo_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPMADD52LUQ instruction.\n" "///\n" "/// \\return\n" "/// return __m128i dst.\n" "/// \\param __X\n" "/// A 128-bit vector of [2 x i64]\n" "/// \\param __Y\n" "/// A 128-bit vector of [2 x i64]\n" "/// \\param __Z\n" "/// A 128-bit vector of [2 x i64]\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 1\n" "/// i := j*64\n" "/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])\n" "/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {\n" " return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y,\n" " (__v2di)__Z);\n" "}\n" "\n" "/// Multiply packed unsigned 52-bit integers in each 64-bit element of \\a __Y\n" "/// and \\a __Z to form a 104-bit intermediate result. Add the low 52-bit\n" "/// unsigned integer from the intermediate result with the corresponding\n" "/// unsigned 64-bit integer in \\a __X, and store the results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i\n" "/// _mm256_madd52lo_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPMADD52LUQ instruction.\n" "///\n" "/// \\return\n" "/// return __m256i dst.\n" "/// \\param __X\n" "/// A 256-bit vector of [4 x i64]\n" "/// \\param __Y\n" "/// A 256-bit vector of [4 x i64]\n" "/// \\param __Z\n" "/// A 256-bit vector of [4 x i64]\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// i := j*64\n" "/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])\n" "/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {\n" " return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y,\n" " (__v4di)__Z);\n" "}\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __AVXIFMAINTRIN_H\n" "" } , { "/builtins/avxintrin.h" , "/*===---- avxintrin.h - AVX intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __AVXINTRIN_H\n" "#define __AVXINTRIN_H\n" "\n" "typedef double __v4df __attribute__ ((__vector_size__ (32)));\n" "typedef float __v8sf __attribute__ ((__vector_size__ (32)));\n" "typedef long long __v4di __attribute__ ((__vector_size__ (32)));\n" "typedef int __v8si __attribute__ ((__vector_size__ (32)));\n" "typedef short __v16hi __attribute__ ((__vector_size__ (32)));\n" "typedef char __v32qi __attribute__ ((__vector_size__ (32)));\n" "\n" "/* Unsigned types */\n" "typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32)));\n" "typedef unsigned int __v8su __attribute__ ((__vector_size__ (32)));\n" "typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32)));\n" "typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32)));\n" "\n" "/* We need an explicitly signed variant for char. Note that this shouldn't\n" " * appear in the interface though. */\n" "typedef signed char __v32qs __attribute__((__vector_size__(32)));\n" "\n" "typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32)));\n" "typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32)));\n" "typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32)));\n" "\n" "typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1)));\n" "typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1)));\n" "typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1)));\n" "\n" "#ifdef __SSE2__\n" "/* Both _Float16 and __bf16 require SSE2 being enabled. */\n" "typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32)));\n" "typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32)));\n" "typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1)));\n" "\n" "typedef __bf16 __v16bf __attribute__((__vector_size__(32), __aligned__(32)));\n" "typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32)));\n" "#endif\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"avx\"), __min_vector_width__(256)))\n" "#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__(\"avx\"), __min_vector_width__(128)))\n" "\n" "/* Arithmetic */\n" "/// Adds two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the sums of both\n" "/// operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_add_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4df)__a+(__v4df)__b);\n" "}\n" "\n" "/// Adds two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the sums of both\n" "/// operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_add_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8sf)__a+(__v8sf)__b);\n" "}\n" "\n" "/// Subtracts two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the minuend.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the differences between\n" "/// both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_sub_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4df)__a-(__v4df)__b);\n" "}\n" "\n" "/// Subtracts two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the minuend.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the differences between\n" "/// both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_sub_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8sf)__a-(__v8sf)__b);\n" "}\n" "\n" "/// Adds the even-indexed values and subtracts the odd-indexed values of\n" "/// two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the left source operand.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing the right source operand.\n" "/// \\returns A 256-bit vector of [4 x double] containing the alternating sums\n" "/// and differences between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_addsub_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Adds the even-indexed values and subtracts the odd-indexed values of\n" "/// two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSUBPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the left source operand.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing the right source operand.\n" "/// \\returns A 256-bit vector of [8 x float] containing the alternating sums and\n" "/// differences between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_addsub_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Divides two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the dividend.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing the divisor.\n" "/// \\returns A 256-bit vector of [4 x double] containing the quotients of both\n" "/// operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_div_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4df)__a/(__v4df)__b);\n" "}\n" "\n" "/// Divides two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the dividend.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing the divisor.\n" "/// \\returns A 256-bit vector of [8 x float] containing the quotients of both\n" "/// operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_div_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8sf)__a/(__v8sf)__b);\n" "}\n" "\n" "/// Compares two 256-bit vectors of [4 x double] and returns the greater\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the maximum values\n" "/// between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_max_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Compares two 256-bit vectors of [8 x float] and returns the greater\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the maximum values\n" "/// between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_max_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Compares two 256-bit vectors of [4 x double] and returns the lesser\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the minimum values\n" "/// between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_min_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Compares two 256-bit vectors of [8 x float] and returns the lesser\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the minimum values\n" "/// between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_min_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Multiplies two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the products of both\n" "/// operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_mul_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4df)__a * (__v4df)__b);\n" "}\n" "\n" "/// Multiplies two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the products of both\n" "/// operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_mul_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8sf)__a * (__v8sf)__b);\n" "}\n" "\n" "/// Calculates the square roots of the values in a 256-bit vector of\n" "/// [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 256-bit vector of [4 x double] containing the square roots of the\n" "/// values in the operand.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_sqrt_pd(__m256d __a)\n" "{\n" " return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);\n" "}\n" "\n" "/// Calculates the square roots of the values in a 256-bit vector of\n" "/// [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the square roots of the\n" "/// values in the operand.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_sqrt_ps(__m256 __a)\n" "{\n" " return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);\n" "}\n" "\n" "/// Calculates the reciprocal square roots of the values in a 256-bit\n" "/// vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRSQRTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the reciprocal square\n" "/// roots of the values in the operand.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_rsqrt_ps(__m256 __a)\n" "{\n" " return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);\n" "}\n" "\n" "/// Calculates the reciprocals of the values in a 256-bit vector of\n" "/// [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRCPPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the reciprocals of the\n" "/// values in the operand.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_rcp_ps(__m256 __a)\n" "{\n" " return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);\n" "}\n" "\n" "/// Rounds the values in a 256-bit vector of [4 x double] as specified\n" "/// by the byte operand. The source values are rounded to integer values and\n" "/// returned as 64-bit double-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_round_pd(__m256d V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used. \\n\n" "/// 1: The PE field is not updated. \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M. \\n\n" "/// 1: Use the current MXCSR setting. \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest. \\n\n" "/// 01: Downward (toward negative infinity). \\n\n" "/// 10: Upward (toward positive infinity). \\n\n" "/// 11: Truncated.\n" "/// \\returns A 256-bit vector of [4 x double] containing the rounded values.\n" "#define _mm256_round_pd(V, M) \\\n" " ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)))\n" "\n" "/// Rounds the values stored in a 256-bit vector of [8 x float] as\n" "/// specified by the byte operand. The source values are rounded to integer\n" "/// values and returned as floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_round_ps(__m256 V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used. \\n\n" "/// 1: The PE field is not updated. \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M. \\n\n" "/// 1: Use the current MXCSR setting. \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest. \\n\n" "/// 01: Downward (toward negative infinity). \\n\n" "/// 10: Upward (toward positive infinity). \\n\n" "/// 11: Truncated.\n" "/// \\returns A 256-bit vector of [8 x float] containing the rounded values.\n" "#define _mm256_round_ps(V, M) \\\n" " ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)))\n" "\n" "/// Rounds up the values stored in a 256-bit vector of [4 x double]. The\n" "/// source values are rounded up to integer values and returned as 64-bit\n" "/// double-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_ceil_pd(__m256d V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 256-bit vector of [4 x double] containing the rounded up values.\n" "#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)\n" "\n" "/// Rounds down the values stored in a 256-bit vector of [4 x double].\n" "/// The source values are rounded down to integer values and returned as\n" "/// 64-bit double-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_floor_pd(__m256d V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 256-bit vector of [4 x double] containing the rounded down\n" "/// values.\n" "#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)\n" "\n" "/// Rounds up the values stored in a 256-bit vector of [8 x float]. The\n" "/// source values are rounded up to integer values and returned as\n" "/// floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_ceil_ps(__m256 V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the rounded up values.\n" "#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)\n" "\n" "/// Rounds down the values stored in a 256-bit vector of [8 x float]. The\n" "/// source values are rounded down to integer values and returned as\n" "/// floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_floor_ps(__m256 V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the rounded down values.\n" "#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)\n" "\n" "/* Logical */\n" "/// Performs a bitwise AND of two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the bitwise AND of the\n" "/// values between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_and_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4du)__a & (__v4du)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the bitwise AND of the\n" "/// values between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_and_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8su)__a & (__v8su)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 256-bit vectors of [4 x double], using\n" "/// the one's complement of the values contained in the first source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDNPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the left source operand. The\n" "/// one's complement of this value is used in the bitwise AND.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing the right source operand.\n" "/// \\returns A 256-bit vector of [4 x double] containing the bitwise AND of the\n" "/// values of the second operand and the one's complement of the first\n" "/// operand.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_andnot_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)(~(__v4du)__a & (__v4du)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 256-bit vectors of [8 x float], using\n" "/// the one's complement of the values contained in the first source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDNPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the left source operand. The\n" "/// one's complement of this value is used in the bitwise AND.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing the right source operand.\n" "/// \\returns A 256-bit vector of [8 x float] containing the bitwise AND of the\n" "/// values of the second operand and the one's complement of the first\n" "/// operand.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_andnot_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)(~(__v8su)__a & (__v8su)__b);\n" "}\n" "\n" "/// Performs a bitwise OR of two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VORPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the bitwise OR of the\n" "/// values between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_or_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4du)__a | (__v4du)__b);\n" "}\n" "\n" "/// Performs a bitwise OR of two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VORPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the bitwise OR of the\n" "/// values between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_or_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8su)__a | (__v8su)__b);\n" "}\n" "\n" "/// Performs a bitwise XOR of two 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [4 x double] containing the bitwise XOR of the\n" "/// values between both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_xor_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)((__v4du)__a ^ (__v4du)__b);\n" "}\n" "\n" "/// Performs a bitwise XOR of two 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// \\returns A 256-bit vector of [8 x float] containing the bitwise XOR of the\n" "/// values between both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_xor_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)((__v8su)__a ^ (__v8su)__b);\n" "}\n" "\n" "/* Horizontal arithmetic */\n" "/// Horizontally adds the adjacent pairs of values contained in two\n" "/// 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHADDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// The horizontal sums of the values are returned in the even-indexed\n" "/// elements of a vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// The horizontal sums of the values are returned in the odd-indexed\n" "/// elements of a vector of [4 x double].\n" "/// \\returns A 256-bit vector of [4 x double] containing the horizontal sums of\n" "/// both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_hadd_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in two\n" "/// 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHADDPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// The horizontal sums of the values are returned in the elements with\n" "/// index 0, 1, 4, 5 of a vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// The horizontal sums of the values are returned in the elements with\n" "/// index 2, 3, 6, 7 of a vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the horizontal sums of\n" "/// both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_hadd_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in two\n" "/// 256-bit vectors of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHSUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// The horizontal differences between the values are returned in the\n" "/// even-indexed elements of a vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing one of the source operands.\n" "/// The horizontal differences between the values are returned in the\n" "/// odd-indexed elements of a vector of [4 x double].\n" "/// \\returns A 256-bit vector of [4 x double] containing the horizontal\n" "/// differences of both operands.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_hsub_pd(__m256d __a, __m256d __b)\n" "{\n" " return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in two\n" "/// 256-bit vectors of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHSUBPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// The horizontal differences between the values are returned in the\n" "/// elements with index 0, 1, 4, 5 of a vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float] containing one of the source operands.\n" "/// The horizontal differences between the values are returned in the\n" "/// elements with index 2, 3, 6, 7 of a vector of [8 x float].\n" "/// \\returns A 256-bit vector of [8 x float] containing the horizontal\n" "/// differences of both operands.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_hsub_ps(__m256 __a, __m256 __b)\n" "{\n" " return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/* Vector permutations */\n" "/// Copies the values in a 128-bit vector of [2 x double] as specified\n" "/// by the 128-bit integer vector operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __c\n" "/// A 128-bit integer vector operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bit [1]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned\n" "/// vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [63:0] of the\n" "/// returned vector. \\n\n" "/// Bit [65]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [127:64] of the\n" "/// returned vector.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied values.\n" "static __inline __m128d __DEFAULT_FN_ATTRS128\n" "_mm_permutevar_pd(__m128d __a, __m128i __c)\n" "{\n" " return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);\n" "}\n" "\n" "/// Copies the values in a 256-bit vector of [4 x double] as specified\n" "/// by the 256-bit integer vector operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __c\n" "/// A 256-bit integer vector operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bit [1]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned\n" "/// vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [63:0] of the\n" "/// returned vector. \\n\n" "/// Bit [65]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// Bit [129]: \\n\n" "/// 0: Bits [191:128] of the source are copied to bits [191:128] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [255:192] of the source are copied to bits [191:128] of the\n" "/// returned vector. \\n\n" "/// Bit [193]: \\n\n" "/// 0: Bits [191:128] of the source are copied to bits [255:192] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [255:192] of the source are copied to bits [255:192] of the\n" "/// returned vector.\n" "/// \\returns A 256-bit vector of [4 x double] containing the copied values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_permutevar_pd(__m256d __a, __m256i __c)\n" "{\n" " return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);\n" "}\n" "\n" "/// Copies the values stored in a 128-bit vector of [4 x float] as\n" "/// specified by the 128-bit integer vector operand.\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __c\n" "/// A 128-bit integer vector operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// Bits [33:32]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// Bits [65:64]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// Bits [97:96]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [127:96] of the\n" "/// returned vector.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied values.\n" "static __inline __m128 __DEFAULT_FN_ATTRS128\n" "_mm_permutevar_ps(__m128 __a, __m128i __c)\n" "{\n" " return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);\n" "}\n" "\n" "/// Copies the values stored in a 256-bit vector of [8 x float] as\n" "/// specified by the 256-bit integer vector operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __c\n" "/// A 256-bit integer vector operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// Bits [33:32]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// Bits [65:64]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// Bits [97:96]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// Bits [129:128]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// Bits [161:160]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// Bits [193:192]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// Bits [225:224]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [255:224] of the\n" "/// returned vector.\n" "/// \\returns A 256-bit vector of [8 x float] containing the copied values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_permutevar_ps(__m256 __a, __m256i __c)\n" "{\n" " return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);\n" "}\n" "\n" "/// Copies the values in a 128-bit vector of [2 x double] as specified\n" "/// by the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_permute_pd(__m128d A, const int C);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERMILPD instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param C\n" "/// An immediate integer operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bit [0]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned\n" "/// vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [63:0] of the\n" "/// returned vector. \\n\n" "/// Bit [1]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [127:64] of the\n" "/// returned vector.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied values.\n" "#define _mm_permute_pd(A, C) \\\n" " ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)))\n" "\n" "/// Copies the values in a 256-bit vector of [4 x double] as specified by\n" "/// the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_permute_pd(__m256d A, const int C);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERMILPD instruction.\n" "///\n" "/// \\param A\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param C\n" "/// An immediate integer operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bit [0]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned\n" "/// vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [63:0] of the\n" "/// returned vector. \\n\n" "/// Bit [1]: \\n\n" "/// 0: Bits [63:0] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [127:64] of the source are copied to bits [127:64] of the\n" "/// returned vector. \\n\n" "/// Bit [2]: \\n\n" "/// 0: Bits [191:128] of the source are copied to bits [191:128] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [255:192] of the source are copied to bits [191:128] of the\n" "/// returned vector. \\n\n" "/// Bit [3]: \\n\n" "/// 0: Bits [191:128] of the source are copied to bits [255:192] of the\n" "/// returned vector. \\n\n" "/// 1: Bits [255:192] of the source are copied to bits [255:192] of the\n" "/// returned vector.\n" "/// \\returns A 256-bit vector of [4 x double] containing the copied values.\n" "#define _mm256_permute_pd(A, C) \\\n" " ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C)))\n" "\n" "/// Copies the values in a 128-bit vector of [4 x float] as specified by\n" "/// the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_permute_ps(__m128 A, const int C);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERMILPS instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param C\n" "/// An immediate integer operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// Bits [3:2]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// Bits [7:6]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [127:96] of the\n" "/// returned vector.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied values.\n" "#define _mm_permute_ps(A, C) \\\n" " ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C)))\n" "\n" "/// Copies the values in a 256-bit vector of [8 x float] as specified by\n" "/// the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_permute_ps(__m256 A, const int C);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERMILPS instruction.\n" "///\n" "/// \\param A\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param C\n" "/// An immediate integer operand specifying how the values are to be\n" "/// copied. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [31:0] of the\n" "/// returned vector. \\n\n" "/// Bits [3:2]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [63:32] of the\n" "/// returned vector. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [95:64] of the\n" "/// returned vector. \\n\n" "/// Bits [7:6]: \\n\n" "/// 00: Bits [31:0] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [63:32] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [95:64] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [127:96] of the source are copied to bits [127:96] of the\n" "/// returned vector. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [159:128] of the\n" "/// returned vector. \\n\n" "/// Bits [3:2]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [191:160] of the\n" "/// returned vector. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [223:192] of the\n" "/// returned vector. \\n\n" "/// Bits [7:6]: \\n\n" "/// 00: Bits [159:128] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 01: Bits [191:160] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 10: Bits [223:192] of the source are copied to bits [255:224] of the\n" "/// returned vector. \\n\n" "/// 11: Bits [255:224] of the source are copied to bits [255:224] of the\n" "/// returned vector.\n" "/// \\returns A 256-bit vector of [8 x float] containing the copied values.\n" "#define _mm256_permute_ps(A, C) \\\n" " ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C)))\n" "\n" "/// Permutes 128-bit data values stored in two 256-bit vectors of\n" "/// [4 x double], as specified by the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERM2F128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param V2\n" "/// A 256-bit vector of [4 x double.\n" "/// \\param M\n" "/// An immediate integer operand specifying how the values are to be\n" "/// permuted. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination.\n" "/// \\returns A 256-bit vector of [4 x double] containing the copied values.\n" "#define _mm256_permute2f128_pd(V1, V2, M) \\\n" " ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \\\n" " (__v4df)(__m256d)(V2), (int)(M)))\n" "\n" "/// Permutes 128-bit data values stored in two 256-bit vectors of\n" "/// [8 x float], as specified by the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERM2F128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param V2\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param M\n" "/// An immediate integer operand specifying how the values are to be\n" "/// permuted. \\n\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination.\n" "/// \\returns A 256-bit vector of [8 x float] containing the copied values.\n" "#define _mm256_permute2f128_ps(V1, V2, M) \\\n" " ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \\\n" " (__v8sf)(__m256)(V2), (int)(M)))\n" "\n" "/// Permutes 128-bit data values stored in two 256-bit integer vectors,\n" "/// as specified by the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPERM2F128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit integer vector.\n" "/// \\param V2\n" "/// A 256-bit integer vector.\n" "/// \\param M\n" "/// An immediate integer operand specifying how the values are to be copied.\n" "/// Bits [1:0]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [127:0] of the\n" "/// destination. \\n\n" "/// Bits [5:4]: \\n\n" "/// 00: Bits [127:0] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 01: Bits [255:128] of operand \\a V1 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 10: Bits [127:0] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination. \\n\n" "/// 11: Bits [255:128] of operand \\a V2 are copied to bits [255:128] of the\n" "/// destination.\n" "/// \\returns A 256-bit integer vector containing the copied values.\n" "#define _mm256_permute2f128_si256(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \\\n" " (__v8si)(__m256i)(V2), (int)(M)))\n" "\n" "/* Vector Blend */\n" "/// Merges 64-bit double-precision data values stored in either of the\n" "/// two 256-bit vectors of [4 x double], as specified by the immediate\n" "/// integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VBLENDPD instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param V2\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param M\n" "/// An immediate integer operand, with mask bits [3:0] specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// index of a copied value. When a mask bit is 0, the corresponding 64-bit\n" "/// element in operand \\a V1 is copied to the same position in the\n" "/// destination. When a mask bit is 1, the corresponding 64-bit element in\n" "/// operand \\a V2 is copied to the same position in the destination.\n" "/// \\returns A 256-bit vector of [4 x double] containing the copied values.\n" "#define _mm256_blend_pd(V1, V2, M) \\\n" " ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \\\n" " (__v4df)(__m256d)(V2), (int)(M)))\n" "\n" "/// Merges 32-bit single-precision data values stored in either of the\n" "/// two 256-bit vectors of [8 x float], as specified by the immediate\n" "/// integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VBLENDPS instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param V2\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param M\n" "/// An immediate integer operand, with mask bits [7:0] specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// index of a copied value. When a mask bit is 0, the corresponding 32-bit\n" "/// element in operand \\a V1 is copied to the same position in the\n" "/// destination. When a mask bit is 1, the corresponding 32-bit element in\n" "/// operand \\a V2 is copied to the same position in the destination.\n" "/// \\returns A 256-bit vector of [8 x float] containing the copied values.\n" "#define _mm256_blend_ps(V1, V2, M) \\\n" " ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \\\n" " (__v8sf)(__m256)(V2), (int)(M)))\n" "\n" "/// Merges 64-bit double-precision data values stored in either of the\n" "/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDVPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __c\n" "/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying\n" "/// how the values are to be copied. The position of the mask bit corresponds\n" "/// to the most significant bit of a copied value. When a mask bit is 0, the\n" "/// corresponding 64-bit element in operand \\a __a is copied to the same\n" "/// position in the destination. When a mask bit is 1, the corresponding\n" "/// 64-bit element in operand \\a __b is copied to the same position in the\n" "/// destination.\n" "/// \\returns A 256-bit vector of [4 x double] containing the copied values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)\n" "{\n" " return (__m256d)__builtin_ia32_blendvpd256(\n" " (__v4df)__a, (__v4df)__b, (__v4df)__c);\n" "}\n" "\n" "/// Merges 32-bit single-precision data values stored in either of the\n" "/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDVPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __c\n" "/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63,\n" "/// and 31 specifying how the values are to be copied. The position of the\n" "/// mask bit corresponds to the most significant bit of a copied value. When\n" "/// a mask bit is 0, the corresponding 32-bit element in operand \\a __a is\n" "/// copied to the same position in the destination. When a mask bit is 1, the\n" "/// corresponding 32-bit element in operand \\a __b is copied to the same\n" "/// position in the destination.\n" "/// \\returns A 256-bit vector of [8 x float] containing the copied values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)\n" "{\n" " return (__m256)__builtin_ia32_blendvps256(\n" " (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);\n" "}\n" "\n" "/* Vector Dot Product */\n" "/// Computes two dot products in parallel, using the lower and upper\n" "/// halves of two [8 x float] vectors as input to the two computations, and\n" "/// returning the two dot products in the lower and upper halves of the\n" "/// [8 x float] result.\n" "///\n" "/// The immediate integer operand controls which input elements will\n" "/// contribute to the dot product, and where the final results are returned.\n" "/// In general, for each dot product, the four corresponding elements of the\n" "/// input vectors are multiplied; the first two and second two products are\n" "/// summed, then the two sums are added to form the final result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VDPPS instruction.\n" "///\n" "/// \\param V1\n" "/// A vector of [8 x float] values, treated as two [4 x float] vectors.\n" "/// \\param V2\n" "/// A vector of [8 x float] values, treated as two [4 x float] vectors.\n" "/// \\param M\n" "/// An immediate integer argument. Bits [7:4] determine which elements of\n" "/// the input vectors are used, with bit [4] corresponding to the lowest\n" "/// element and bit [7] corresponding to the highest element of each [4 x\n" "/// float] subvector. If a bit is set, the corresponding elements from the\n" "/// two input vectors are used as an input for dot product; otherwise that\n" "/// input is treated as zero. Bits [3:0] determine which elements of the\n" "/// result will receive a copy of the final dot product, with bit [0]\n" "/// corresponding to the lowest element and bit [3] corresponding to the\n" "/// highest element of each [4 x float] subvector. If a bit is set, the dot\n" "/// product is returned in the corresponding element; otherwise that element\n" "/// is set to zero. The bitmask is applied in the same way to each of the\n" "/// two parallel dot product computations.\n" "/// \\returns A 256-bit vector of [8 x float] containing the two dot products.\n" "#define _mm256_dp_ps(V1, V2, M) \\\n" " ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \\\n" " (__v8sf)(__m256)(V2), (M)))\n" "\n" "/* Vector shuffle */\n" "/// Selects 8 float values from the 256-bit operands of [8 x float], as\n" "/// specified by the immediate value operand.\n" "///\n" "/// The four selected elements in each operand are copied to the destination\n" "/// according to the bits specified in the immediate operand. The selected\n" "/// elements from the first 256-bit operand are copied to bits [63:0] and\n" "/// bits [191:128] of the destination, and the selected elements from the\n" "/// second 256-bit operand are copied to bits [127:64] and bits [255:192] of\n" "/// the destination. For example, if bits [7:0] of the immediate operand\n" "/// contain a value of 0xFF, the 256-bit destination vector would contain the\n" "/// following values: b[7], b[7], a[7], a[7], b[3], b[3], a[3], a[3].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VSHUFPS instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [8 x float]. The four selected elements in this\n" "/// operand are copied to bits [63:0] and bits [191:128] in the destination,\n" "/// according to the bits specified in the immediate operand.\n" "/// \\param b\n" "/// A 256-bit vector of [8 x float]. The four selected elements in this\n" "/// operand are copied to bits [127:64] and bits [255:192] in the\n" "/// destination, according to the bits specified in the immediate operand.\n" "/// \\param mask\n" "/// An immediate value containing an 8-bit value specifying which elements to\n" "/// copy from \\a a and \\a b \\n.\n" "/// Bits [3:0] specify the values copied from operand \\a a. \\n\n" "/// Bits [7:4] specify the values copied from operand \\a b. \\n\n" "/// The destinations within the 256-bit destination are assigned values as\n" "/// follows, according to the bit value assignments described below: \\n\n" "/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the\n" "/// destination. \\n\n" "/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the\n" "/// destination. \\n\n" "/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the\n" "/// destination. \\n\n" "/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in\n" "/// the destination. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \\n\n" "/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \\n\n" "/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \\n\n" "/// 11: Bits [127:96] and [255:224] are copied from the selected operand. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 256-bit vector of [8 x float] containing the shuffled values.\n" "#define _mm256_shuffle_ps(a, b, mask) \\\n" " ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \\\n" " (__v8sf)(__m256)(b), (int)(mask)))\n" "\n" "/// Selects four double-precision values from the 256-bit operands of\n" "/// [4 x double], as specified by the immediate value operand.\n" "///\n" "/// The selected elements from the first 256-bit operand are copied to bits\n" "/// [63:0] and bits [191:128] in the destination, and the selected elements\n" "/// from the second 256-bit operand are copied to bits [127:64] and bits\n" "/// [255:192] in the destination. For example, if bits [3:0] of the immediate\n" "/// operand contain a value of 0xF, the 256-bit destination vector would\n" "/// contain the following values: b[3], a[3], b[1], a[1].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VSHUFPD instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param mask\n" "/// An immediate value containing 8-bit values specifying which elements to\n" "/// copy from \\a a and \\a b: \\n\n" "/// Bit [0]=0: Bits [63:0] are copied from \\a a to bits [63:0] of the\n" "/// destination. \\n\n" "/// Bit [0]=1: Bits [127:64] are copied from \\a a to bits [63:0] of the\n" "/// destination. \\n\n" "/// Bit [1]=0: Bits [63:0] are copied from \\a b to bits [127:64] of the\n" "/// destination. \\n\n" "/// Bit [1]=1: Bits [127:64] are copied from \\a b to bits [127:64] of the\n" "/// destination. \\n\n" "/// Bit [2]=0: Bits [191:128] are copied from \\a a to bits [191:128] of the\n" "/// destination. \\n\n" "/// Bit [2]=1: Bits [255:192] are copied from \\a a to bits [191:128] of the\n" "/// destination. \\n\n" "/// Bit [3]=0: Bits [191:128] are copied from \\a b to bits [255:192] of the\n" "/// destination. \\n\n" "/// Bit [3]=1: Bits [255:192] are copied from \\a b to bits [255:192] of the\n" "/// destination.\n" "/// \\returns A 256-bit vector of [4 x double] containing the shuffled values.\n" "#define _mm256_shuffle_pd(a, b, mask) \\\n" " ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \\\n" " (__v4df)(__m256d)(b), (int)(mask)))\n" "\n" "/* Compare */\n" "#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */\n" "#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */\n" "#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */\n" "#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */\n" "#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */\n" "#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */\n" "#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */\n" "#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */\n" "#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */\n" "#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */\n" "#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */\n" "#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */\n" "#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */\n" "#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */\n" "#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */\n" "#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */\n" "#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */\n" "#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */\n" "#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */\n" "#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */\n" "#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */\n" "#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */\n" "#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unordered, non-signaling) */\n" "#define _CMP_ORD_S 0x17 /* Ordered (signaling) */\n" "#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */\n" "#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unordered, non-signaling) */\n" "#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */\n" "#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */\n" "#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */\n" "#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */\n" "#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */\n" "#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */\n" "\n" "/// Compares each of the corresponding double-precision values of two\n" "/// 128-bit vectors of [2 x double], using the operation specified by the\n" "/// immediate integer operand.\n" "///\n" "/// Returns a [2 x double] vector consisting of two doubles corresponding to\n" "/// the two comparison results: zero if the comparison is false, and all 1's\n" "/// if the comparison is true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPPD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 128-bit vector of [2 x double] containing the comparison results.\n" "#define _mm_cmp_pd(a, b, c) \\\n" " ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \\\n" " (__v2df)(__m128d)(b), (c)))\n" "\n" "/// Compares each of the corresponding values of two 128-bit vectors of\n" "/// [4 x float], using the operation specified by the immediate integer\n" "/// operand.\n" "///\n" "/// Returns a [4 x float] vector consisting of four floats corresponding to\n" "/// the four comparison results: zero if the comparison is false, and all 1's\n" "/// if the comparison is true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPPS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "#define _mm_cmp_ps(a, b, c) \\\n" " ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \\\n" " (__v4sf)(__m128)(b), (c)))\n" "\n" "/// Compares each of the corresponding double-precision values of two\n" "/// 256-bit vectors of [4 x double], using the operation specified by the\n" "/// immediate integer operand.\n" "///\n" "/// Returns a [4 x double] vector consisting of four doubles corresponding to\n" "/// the four comparison results: zero if the comparison is false, and all 1's\n" "/// if the comparison is true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPPD instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 256-bit vector of [4 x double] containing the comparison results.\n" "#define _mm256_cmp_pd(a, b, c) \\\n" " ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \\\n" " (__v4df)(__m256d)(b), (c)))\n" "\n" "/// Compares each of the corresponding values of two 256-bit vectors of\n" "/// [8 x float], using the operation specified by the immediate integer\n" "/// operand.\n" "///\n" "/// Returns a [8 x float] vector consisting of eight floats corresponding to\n" "/// the eight comparison results: zero if the comparison is false, and all\n" "/// 1's if the comparison is true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPPS instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param b\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 256-bit vector of [8 x float] containing the comparison results.\n" "#define _mm256_cmp_ps(a, b, c) \\\n" " ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \\\n" " (__v8sf)(__m256)(b), (c)))\n" "\n" "/// Compares each of the corresponding scalar double-precision values of\n" "/// two 128-bit vectors of [2 x double], using the operation specified by the\n" "/// immediate integer operand.\n" "///\n" "/// If the result is true, all 64 bits of the destination vector are set;\n" "/// otherwise they are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPSD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 128-bit vector of [2 x double] containing the comparison results.\n" "#define _mm_cmp_sd(a, b, c) \\\n" " ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \\\n" " (__v2df)(__m128d)(b), (c)))\n" "\n" "/// Compares each of the corresponding scalar values of two 128-bit\n" "/// vectors of [4 x float], using the operation specified by the immediate\n" "/// integer operand.\n" "///\n" "/// If the result is true, all 32 bits of the destination vector are set;\n" "/// otherwise they are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCMPSS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param c\n" "/// An immediate integer operand, with bits [4:0] specifying which comparison\n" "/// operation to use: \\n\n" "/// 0x00: Equal (ordered, non-signaling) \\n\n" "/// 0x01: Less-than (ordered, signaling) \\n\n" "/// 0x02: Less-than-or-equal (ordered, signaling) \\n\n" "/// 0x03: Unordered (non-signaling) \\n\n" "/// 0x04: Not-equal (unordered, non-signaling) \\n\n" "/// 0x05: Not-less-than (unordered, signaling) \\n\n" "/// 0x06: Not-less-than-or-equal (unordered, signaling) \\n\n" "/// 0x07: Ordered (non-signaling) \\n\n" "/// 0x08: Equal (unordered, non-signaling) \\n\n" "/// 0x09: Not-greater-than-or-equal (unordered, signaling) \\n\n" "/// 0x0A: Not-greater-than (unordered, signaling) \\n\n" "/// 0x0B: False (ordered, non-signaling) \\n\n" "/// 0x0C: Not-equal (ordered, non-signaling) \\n\n" "/// 0x0D: Greater-than-or-equal (ordered, signaling) \\n\n" "/// 0x0E: Greater-than (ordered, signaling) \\n\n" "/// 0x0F: True (unordered, non-signaling) \\n\n" "/// 0x10: Equal (ordered, signaling) \\n\n" "/// 0x11: Less-than (ordered, non-signaling) \\n\n" "/// 0x12: Less-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x13: Unordered (signaling) \\n\n" "/// 0x14: Not-equal (unordered, signaling) \\n\n" "/// 0x15: Not-less-than (unordered, non-signaling) \\n\n" "/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x17: Ordered (signaling) \\n\n" "/// 0x18: Equal (unordered, signaling) \\n\n" "/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \\n\n" "/// 0x1A: Not-greater-than (unordered, non-signaling) \\n\n" "/// 0x1B: False (ordered, signaling) \\n\n" "/// 0x1C: Not-equal (ordered, signaling) \\n\n" "/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \\n\n" "/// 0x1E: Greater-than (ordered, non-signaling) \\n\n" "/// 0x1F: True (unordered, signaling)\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "#define _mm_cmp_ss(a, b, c) \\\n" " ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \\\n" " (__v4sf)(__m128)(b), (c)))\n" "\n" "/// Takes a [8 x i32] vector and returns the vector element value\n" "/// indexed by the immediate constant operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm256_extract_epi32(__m256i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\param N\n" "/// An immediate integer operand with bits [2:0] determining which vector\n" "/// element is extracted and returned.\n" "/// \\returns A 32-bit integer containing the extracted 32 bits of extended\n" "/// packed data.\n" "#define _mm256_extract_epi32(X, N) \\\n" " ((int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N)))\n" "\n" "/// Takes a [16 x i16] vector and returns the vector element value\n" "/// indexed by the immediate constant operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm256_extract_epi16(__m256i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A 256-bit integer vector of [16 x i16].\n" "/// \\param N\n" "/// An immediate integer operand with bits [3:0] determining which vector\n" "/// element is extracted and returned.\n" "/// \\returns A 32-bit integer containing the extracted 16 bits of zero extended\n" "/// packed data.\n" "#define _mm256_extract_epi16(X, N) \\\n" " ((int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \\\n" " (int)(N)))\n" "\n" "/// Takes a [32 x i8] vector and returns the vector element value\n" "/// indexed by the immediate constant operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm256_extract_epi8(__m256i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A 256-bit integer vector of [32 x i8].\n" "/// \\param N\n" "/// An immediate integer operand with bits [4:0] determining which vector\n" "/// element is extracted and returned.\n" "/// \\returns A 32-bit integer containing the extracted 8 bits of zero extended\n" "/// packed data.\n" "#define _mm256_extract_epi8(X, N) \\\n" " ((int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \\\n" " (int)(N)))\n" "\n" "#ifdef __x86_64__\n" "/// Takes a [4 x i64] vector and returns the vector element value\n" "/// indexed by the immediate constant operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// long long _mm256_extract_epi64(__m256i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A 256-bit integer vector of [4 x i64].\n" "/// \\param N\n" "/// An immediate integer operand with bits [1:0] determining which vector\n" "/// element is extracted and returned.\n" "/// \\returns A 64-bit integer containing the extracted 64 bits of extended\n" "/// packed data.\n" "#define _mm256_extract_epi64(X, N) \\\n" " ((long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N)))\n" "#endif\n" "\n" "/// Takes a [8 x i32] vector and replaces the vector element value\n" "/// indexed by the immediate constant operand by a new value. Returns the\n" "/// modified vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A vector of [8 x i32] to be used by the insert operation.\n" "/// \\param I\n" "/// An integer value. The replacement value for the insert operation.\n" "/// \\param N\n" "/// An immediate integer specifying the index of the vector element to be\n" "/// replaced.\n" "/// \\returns A copy of vector \\a X, after replacing its element indexed by\n" "/// \\a N with \\a I.\n" "#define _mm256_insert_epi32(X, I, N) \\\n" " ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \\\n" " (int)(I), (int)(N)))\n" "\n" "\n" "/// Takes a [16 x i16] vector and replaces the vector element value\n" "/// indexed by the immediate constant operand with a new value. Returns the\n" "/// modified vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A vector of [16 x i16] to be used by the insert operation.\n" "/// \\param I\n" "/// An i16 integer value. The replacement value for the insert operation.\n" "/// \\param N\n" "/// An immediate integer specifying the index of the vector element to be\n" "/// replaced.\n" "/// \\returns A copy of vector \\a X, after replacing its element indexed by\n" "/// \\a N with \\a I.\n" "#define _mm256_insert_epi16(X, I, N) \\\n" " ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \\\n" " (int)(I), (int)(N)))\n" "\n" "/// Takes a [32 x i8] vector and replaces the vector element value\n" "/// indexed by the immediate constant operand with a new value. Returns the\n" "/// modified vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A vector of [32 x i8] to be used by the insert operation.\n" "/// \\param I\n" "/// An i8 integer value. The replacement value for the insert operation.\n" "/// \\param N\n" "/// An immediate integer specifying the index of the vector element to be\n" "/// replaced.\n" "/// \\returns A copy of vector \\a X, after replacing its element indexed by\n" "/// \\a N with \\a I.\n" "#define _mm256_insert_epi8(X, I, N) \\\n" " ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \\\n" " (int)(I), (int)(N)))\n" "\n" "#ifdef __x86_64__\n" "/// Takes a [4 x i64] vector and replaces the vector element value\n" "/// indexed by the immediate constant operand with a new value. Returns the\n" "/// modified vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128+COMPOSITE \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A vector of [4 x i64] to be used by the insert operation.\n" "/// \\param I\n" "/// A 64-bit integer value. The replacement value for the insert operation.\n" "/// \\param N\n" "/// An immediate integer specifying the index of the vector element to be\n" "/// replaced.\n" "/// \\returns A copy of vector \\a X, after replacing its element indexed by\n" "/// \\a N with \\a I.\n" "#define _mm256_insert_epi64(X, I, N) \\\n" " ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \\\n" " (long long)(I), (int)(N)))\n" "#endif\n" "\n" "/* Conversion */\n" "/// Converts a vector of [4 x i32] into a vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTDQ2PD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector of [4 x i32].\n" "/// \\returns A 256-bit vector of [4 x double] containing the converted values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_cvtepi32_pd(__m128i __a)\n" "{\n" " return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);\n" "}\n" "\n" "/// Converts a vector of [8 x i32] into a vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTDQ2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit vector of [8 x float] containing the converted values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_cvtepi32_ps(__m256i __a)\n" "{\n" " return (__m256)__builtin_convertvector((__v8si)__a, __v8sf);\n" "}\n" "\n" "/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPD2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 128-bit vector of [4 x float] containing the converted values.\n" "static __inline __m128 __DEFAULT_FN_ATTRS\n" "_mm256_cvtpd_ps(__m256d __a)\n" "{\n" " return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);\n" "}\n" "\n" "/// Converts a vector of [8 x float] into a vector of [8 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPS2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit integer vector containing the converted values.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_cvtps_epi32(__m256 __a)\n" "{\n" " return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);\n" "}\n" "\n" "/// Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4\n" "/// x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPS2PD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 256-bit vector of [4 x double] containing the converted values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_cvtps_pd(__m128 __a)\n" "{\n" " return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);\n" "}\n" "\n" "/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4\n" "/// x i32], truncating the result by rounding towards zero when it is\n" "/// inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTPD2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 128-bit integer vector containing the converted values.\n" "static __inline __m128i __DEFAULT_FN_ATTRS\n" "_mm256_cvttpd_epi32(__m256d __a)\n" "{\n" " return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);\n" "}\n" "\n" "/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4\n" "/// x i32]. When a conversion is inexact, the value returned is rounded\n" "/// according to the rounding control bits in the MXCSR register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPD2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 128-bit integer vector containing the converted values.\n" "static __inline __m128i __DEFAULT_FN_ATTRS\n" "_mm256_cvtpd_epi32(__m256d __a)\n" "{\n" " return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);\n" "}\n" "\n" "/// Converts a vector of [8 x float] into a vector of [8 x i32],\n" "/// truncating the result by rounding towards zero when it is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTPS2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 256-bit integer vector containing the converted values.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_cvttps_epi32(__m256 __a)\n" "{\n" " return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);\n" "}\n" "\n" "/// Returns the first element of the input vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns A 64 bit double containing the first element of the input vector.\n" "static __inline double __DEFAULT_FN_ATTRS\n" "_mm256_cvtsd_f64(__m256d __a)\n" "{\n" " return __a[0];\n" "}\n" "\n" "/// Returns the first element of the input vector of [8 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x i32].\n" "/// \\returns A 32 bit integer containing the first element of the input vector.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_cvtsi256_si32(__m256i __a)\n" "{\n" " __v8si __b = (__v8si)__a;\n" " return __b[0];\n" "}\n" "\n" "/// Returns the first element of the input vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns A 32 bit float containing the first element of the input vector.\n" "static __inline float __DEFAULT_FN_ATTRS\n" "_mm256_cvtss_f32(__m256 __a)\n" "{\n" " return __a[0];\n" "}\n" "\n" "/* Vector replicate */\n" "/// Moves and duplicates odd-indexed values from a 256-bit vector of\n" "/// [8 x float] to float values in a 256-bit vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSHDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [255:224] of \\a __a are written to bits [255:224] and [223:192] of\n" "/// the return value. \\n\n" "/// Bits [191:160] of \\a __a are written to bits [191:160] and [159:128] of\n" "/// the return value. \\n\n" "/// Bits [127:96] of \\a __a are written to bits [127:96] and [95:64] of the\n" "/// return value. \\n\n" "/// Bits [63:32] of \\a __a are written to bits [63:32] and [31:0] of the\n" "/// return value.\n" "/// \\returns A 256-bit vector of [8 x float] containing the moved and duplicated\n" "/// values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_movehdup_ps(__m256 __a)\n" "{\n" " return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);\n" "}\n" "\n" "/// Moves and duplicates even-indexed values from a 256-bit vector of\n" "/// [8 x float] to float values in a 256-bit vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSLDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [223:192] of \\a __a are written to bits [255:224] and [223:192] of\n" "/// the return value. \\n\n" "/// Bits [159:128] of \\a __a are written to bits [191:160] and [159:128] of\n" "/// the return value. \\n\n" "/// Bits [95:64] of \\a __a are written to bits [127:96] and [95:64] of the\n" "/// return value. \\n\n" "/// Bits [31:0] of \\a __a are written to bits [63:32] and [31:0] of the\n" "/// return value.\n" "/// \\returns A 256-bit vector of [8 x float] containing the moved and duplicated\n" "/// values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_moveldup_ps(__m256 __a)\n" "{\n" " return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);\n" "}\n" "\n" "/// Moves and duplicates double-precision floating point values from a\n" "/// 256-bit vector of [4 x double] to double-precision values in a 256-bit\n" "/// vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double]. \\n\n" "/// Bits [63:0] of \\a __a are written to bits [127:64] and [63:0] of the\n" "/// return value. \\n\n" "/// Bits [191:128] of \\a __a are written to bits [255:192] and [191:128] of\n" "/// the return value.\n" "/// \\returns A 256-bit vector of [4 x double] containing the moved and\n" "/// duplicated values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_movedup_pd(__m256d __a)\n" "{\n" " return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2);\n" "}\n" "\n" "/* Unpack and Interleave */\n" "/// Unpacks the odd-indexed vector elements from two 256-bit vectors of\n" "/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKHPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double]. \\n\n" "/// Bits [127:64] are written to bits [63:0] of the return value. \\n\n" "/// Bits [255:192] are written to bits [191:128] of the return value. \\n\n" "/// \\param __b\n" "/// A 256-bit floating-point vector of [4 x double]. \\n\n" "/// Bits [127:64] are written to bits [127:64] of the return value. \\n\n" "/// Bits [255:192] are written to bits [255:192] of the return value. \\n\n" "/// \\returns A 256-bit vector of [4 x double] containing the interleaved values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_unpackhi_pd(__m256d __a, __m256d __b)\n" "{\n" " return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);\n" "}\n" "\n" "/// Unpacks the even-indexed vector elements from two 256-bit vectors of\n" "/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double]. \\n\n" "/// Bits [63:0] are written to bits [63:0] of the return value. \\n\n" "/// Bits [191:128] are written to bits [191:128] of the return value.\n" "/// \\param __b\n" "/// A 256-bit floating-point vector of [4 x double]. \\n\n" "/// Bits [63:0] are written to bits [127:64] of the return value. \\n\n" "/// Bits [191:128] are written to bits [255:192] of the return value. \\n\n" "/// \\returns A 256-bit vector of [4 x double] containing the interleaved values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_unpacklo_pd(__m256d __a, __m256d __b)\n" "{\n" " return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);\n" "}\n" "\n" "/// Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the\n" "/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit\n" "/// vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKHPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [95:64] are written to bits [31:0] of the return value. \\n\n" "/// Bits [127:96] are written to bits [95:64] of the return value. \\n\n" "/// Bits [223:192] are written to bits [159:128] of the return value. \\n\n" "/// Bits [255:224] are written to bits [223:192] of the return value.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [95:64] are written to bits [63:32] of the return value. \\n\n" "/// Bits [127:96] are written to bits [127:96] of the return value. \\n\n" "/// Bits [223:192] are written to bits [191:160] of the return value. \\n\n" "/// Bits [255:224] are written to bits [255:224] of the return value.\n" "/// \\returns A 256-bit vector of [8 x float] containing the interleaved values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_unpackhi_ps(__m256 __a, __m256 __b)\n" "{\n" " return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);\n" "}\n" "\n" "/// Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the\n" "/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit\n" "/// vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [31:0] are written to bits [31:0] of the return value. \\n\n" "/// Bits [63:32] are written to bits [95:64] of the return value. \\n\n" "/// Bits [159:128] are written to bits [159:128] of the return value. \\n\n" "/// Bits [191:160] are written to bits [223:192] of the return value.\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float]. \\n\n" "/// Bits [31:0] are written to bits [63:32] of the return value. \\n\n" "/// Bits [63:32] are written to bits [127:96] of the return value. \\n\n" "/// Bits [159:128] are written to bits [191:160] of the return value. \\n\n" "/// Bits [191:160] are written to bits [255:224] of the return value.\n" "/// \\returns A 256-bit vector of [8 x float] containing the interleaved values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_unpacklo_ps(__m256 __a, __m256 __b)\n" "{\n" " return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);\n" "}\n" "\n" "/* Bit Test */\n" "/// Given two 128-bit floating-point vectors of [2 x double], perform an\n" "/// element-by-element comparison of the double-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the ZF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns the ZF flag in the EFLAGS register.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testz_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Given two 128-bit floating-point vectors of [2 x double], perform an\n" "/// element-by-element comparison of the double-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the CF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns the CF flag in the EFLAGS register.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testc_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Given two 128-bit floating-point vectors of [2 x double], perform an\n" "/// element-by-element comparison of the double-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,\n" "/// otherwise it returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testnzc_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Given two 128-bit floating-point vectors of [4 x float], perform an\n" "/// element-by-element comparison of the single-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the ZF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns the ZF flag.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testz_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Given two 128-bit floating-point vectors of [4 x float], perform an\n" "/// element-by-element comparison of the single-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the CF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns the CF flag.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testc_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Given two 128-bit floating-point vectors of [4 x float], perform an\n" "/// element-by-element comparison of the single-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,\n" "/// otherwise it returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.\n" "static __inline int __DEFAULT_FN_ATTRS128\n" "_mm_testnzc_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [4 x double], perform an\n" "/// element-by-element comparison of the double-precision elements in the\n" "/// first source vector and the corresponding elements in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the ZF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns the ZF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testz_pd(__m256d __a, __m256d __b)\n" "{\n" " return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [4 x double], perform an\n" "/// element-by-element comparison of the double-precision elements in the\n" "/// first source vector and the corresponding elements in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the CF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns the CF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testc_pd(__m256d __a, __m256d __b)\n" "{\n" " return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [4 x double], perform an\n" "/// element-by-element comparison of the double-precision elements in the\n" "/// first source vector and the corresponding elements in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of double-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,\n" "/// otherwise it returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double].\n" "/// \\returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testnzc_pd(__m256d __a, __m256d __b)\n" "{\n" " return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [8 x float], perform an\n" "/// element-by-element comparison of the single-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the ZF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns the ZF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testz_ps(__m256 __a, __m256 __b)\n" "{\n" " return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [8 x float], perform an\n" "/// element-by-element comparison of the single-precision element in the\n" "/// first source vector and the corresponding element in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the CF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns the CF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testc_ps(__m256 __a, __m256 __b)\n" "{\n" " return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Given two 256-bit floating-point vectors of [8 x float], perform an\n" "/// element-by-element comparison of the single-precision elements in the\n" "/// first source vector and the corresponding elements in the second source\n" "/// vector.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the\n" "/// ZF flag is set to 1. \\n\n" "/// If there is at least one pair of single-precision elements where the\n" "/// sign-bit of the first element is 0 and the sign-bit of the second element\n" "/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,\n" "/// otherwise it returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VTESTPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param __b\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testnzc_ps(__m256 __a, __m256 __b)\n" "{\n" " return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);\n" "}\n" "\n" "/// Given two 256-bit integer vectors, perform a bit-by-bit comparison\n" "/// of the two source vectors.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of bits where both bits are 1, the ZF flag\n" "/// is set to 0. Otherwise the ZF flag is set to 1. \\n\n" "/// If there is at least one pair of bits where the bit from the first source\n" "/// vector is 0 and the bit from the second source vector is 1, the CF flag\n" "/// is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the ZF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns the ZF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testz_si256(__m256i __a, __m256i __b)\n" "{\n" " return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);\n" "}\n" "\n" "/// Given two 256-bit integer vectors, perform a bit-by-bit comparison\n" "/// of the two source vectors.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of bits where both bits are 1, the ZF flag\n" "/// is set to 0. Otherwise the ZF flag is set to 1. \\n\n" "/// If there is at least one pair of bits where the bit from the first source\n" "/// vector is 0 and the bit from the second source vector is 1, the CF flag\n" "/// is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns the value of the CF flag.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns the CF flag.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testc_si256(__m256i __a, __m256i __b)\n" "{\n" " return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);\n" "}\n" "\n" "/// Given two 256-bit integer vectors, perform a bit-by-bit comparison\n" "/// of the two source vectors.\n" "///\n" "/// The EFLAGS register is updated as follows: \\n\n" "/// If there is at least one pair of bits where both bits are 1, the ZF flag\n" "/// is set to 0. Otherwise the ZF flag is set to 1. \\n\n" "/// If there is at least one pair of bits where the bit from the first source\n" "/// vector is 0 and the bit from the second source vector is 1, the CF flag\n" "/// is set to 0. Otherwise the CF flag is set to 1. \\n\n" "/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,\n" "/// otherwise it returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\param __b\n" "/// A 256-bit integer vector.\n" "/// \\returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_testnzc_si256(__m256i __a, __m256i __b)\n" "{\n" " return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);\n" "}\n" "\n" "/* Vector extract sign mask */\n" "/// Extracts the sign bits of double-precision floating point elements\n" "/// in a 256-bit vector of [4 x double] and writes them to the lower order\n" "/// bits of the return value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVMSKPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the double-precision\n" "/// floating point values with sign bits to be extracted.\n" "/// \\returns The sign bits from the operand, written to bits [3:0].\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_movemask_pd(__m256d __a)\n" "{\n" " return __builtin_ia32_movmskpd256((__v4df)__a);\n" "}\n" "\n" "/// Extracts the sign bits of single-precision floating point elements\n" "/// in a 256-bit vector of [8 x float] and writes them to the lower order\n" "/// bits of the return value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVMSKPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the single-precision floating\n" "/// point values with sign bits to be extracted.\n" "/// \\returns The sign bits from the operand, written to bits [7:0].\n" "static __inline int __DEFAULT_FN_ATTRS\n" "_mm256_movemask_ps(__m256 __a)\n" "{\n" " return __builtin_ia32_movmskps256((__v8sf)__a);\n" "}\n" "\n" "/* Vector __zero */\n" "/// Zeroes the contents of all XMM or YMM registers.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VZEROALL instruction.\n" "static __inline void __attribute__((__always_inline__, __nodebug__, __target__(\"avx\")))\n" "_mm256_zeroall(void)\n" "{\n" " __builtin_ia32_vzeroall();\n" "}\n" "\n" "/// Zeroes the upper 128 bits (bits 255:128) of all YMM registers.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VZEROUPPER instruction.\n" "static __inline void __attribute__((__always_inline__, __nodebug__, __target__(\"avx\")))\n" "_mm256_zeroupper(void)\n" "{\n" " __builtin_ia32_vzeroupper();\n" "}\n" "\n" "/* Vector load with broadcast */\n" "/// Loads a scalar single-precision floating point value from the\n" "/// specified address pointed to by \\a __a and broadcasts it to the elements\n" "/// of a [4 x float] vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTSS instruction.\n" "///\n" "/// \\param __a\n" "/// The single-precision floating point value to be broadcast.\n" "/// \\returns A 128-bit vector of [4 x float] whose 32-bit elements are set\n" "/// equal to the broadcast value.\n" "static __inline __m128 __DEFAULT_FN_ATTRS128\n" "_mm_broadcast_ss(float const *__a)\n" "{\n" " struct __mm_broadcast_ss_struct {\n" " float __f;\n" " } __attribute__((__packed__, __may_alias__));\n" " float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f;\n" " return __extension__ (__m128){ __f, __f, __f, __f };\n" "}\n" "\n" "/// Loads a scalar double-precision floating point value from the\n" "/// specified address pointed to by \\a __a and broadcasts it to the elements\n" "/// of a [4 x double] vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTSD instruction.\n" "///\n" "/// \\param __a\n" "/// The double-precision floating point value to be broadcast.\n" "/// \\returns A 256-bit vector of [4 x double] whose 64-bit elements are set\n" "/// equal to the broadcast value.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_broadcast_sd(double const *__a)\n" "{\n" " struct __mm256_broadcast_sd_struct {\n" " double __d;\n" " } __attribute__((__packed__, __may_alias__));\n" " double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d;\n" " return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };\n" "}\n" "\n" "/// Loads a scalar single-precision floating point value from the\n" "/// specified address pointed to by \\a __a and broadcasts it to the elements\n" "/// of a [8 x float] vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTSS instruction.\n" "///\n" "/// \\param __a\n" "/// The single-precision floating point value to be broadcast.\n" "/// \\returns A 256-bit vector of [8 x float] whose 32-bit elements are set\n" "/// equal to the broadcast value.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_broadcast_ss(float const *__a)\n" "{\n" " struct __mm256_broadcast_ss_struct {\n" " float __f;\n" " } __attribute__((__packed__, __may_alias__));\n" " float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f;\n" " return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };\n" "}\n" "\n" "/// Loads the data from a 128-bit vector of [2 x double] from the\n" "/// specified address pointed to by \\a __a and broadcasts it to 128-bit\n" "/// elements in a 256-bit vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTF128 instruction.\n" "///\n" "/// \\param __a\n" "/// The 128-bit vector of [2 x double] to be broadcast.\n" "/// \\returns A 256-bit vector of [4 x double] whose 128-bit elements are set\n" "/// equal to the broadcast value.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_broadcast_pd(__m128d const *__a)\n" "{\n" " __m128d __b = _mm_loadu_pd((const double *)__a);\n" " return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b,\n" " 0, 1, 0, 1);\n" "}\n" "\n" "/// Loads the data from a 128-bit vector of [4 x float] from the\n" "/// specified address pointed to by \\a __a and broadcasts it to 128-bit\n" "/// elements in a 256-bit vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTF128 instruction.\n" "///\n" "/// \\param __a\n" "/// The 128-bit vector of [4 x float] to be broadcast.\n" "/// \\returns A 256-bit vector of [8 x float] whose 128-bit elements are set\n" "/// equal to the broadcast value.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_broadcast_ps(__m128 const *__a)\n" "{\n" " __m128 __b = _mm_loadu_ps((const float *)__a);\n" " return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b,\n" " 0, 1, 2, 3, 0, 1, 2, 3);\n" "}\n" "\n" "/* SIMD load ops */\n" "/// Loads 4 double-precision floating point values from a 32-byte aligned\n" "/// memory location pointed to by \\a __p into a vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPD instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a memory location containing\n" "/// double-precision floating point values.\n" "/// \\returns A 256-bit vector of [4 x double] containing the moved values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_load_pd(double const *__p)\n" "{\n" " return *(const __m256d *)__p;\n" "}\n" "\n" "/// Loads 8 single-precision floating point values from a 32-byte aligned\n" "/// memory location pointed to by \\a __p into a vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a memory location containing float values.\n" "/// \\returns A 256-bit vector of [8 x float] containing the moved values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_load_ps(float const *__p)\n" "{\n" " return *(const __m256 *)__p;\n" "}\n" "\n" "/// Loads 4 double-precision floating point values from an unaligned\n" "/// memory location pointed to by \\a __p into a vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location containing double-precision floating\n" "/// point values.\n" "/// \\returns A 256-bit vector of [4 x double] containing the moved values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_loadu_pd(double const *__p)\n" "{\n" " struct __loadu_pd {\n" " __m256d_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_pd*)__p)->__v;\n" "}\n" "\n" "/// Loads 8 single-precision floating point values from an unaligned\n" "/// memory location pointed to by \\a __p into a vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location containing single-precision floating\n" "/// point values.\n" "/// \\returns A 256-bit vector of [8 x float] containing the moved values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_loadu_ps(float const *__p)\n" "{\n" " struct __loadu_ps {\n" " __m256_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_ps*)__p)->__v;\n" "}\n" "\n" "/// Loads 256 bits of integer data from a 32-byte aligned memory\n" "/// location pointed to by \\a __p into elements of a 256-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQA instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a 256-bit integer vector containing integer\n" "/// values.\n" "/// \\returns A 256-bit integer vector containing the moved values.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_load_si256(__m256i const *__p)\n" "{\n" " return *__p;\n" "}\n" "\n" "/// Loads 256 bits of integer data from an unaligned memory location\n" "/// pointed to by \\a __p into a 256-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQU instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 256-bit integer vector containing integer values.\n" "/// \\returns A 256-bit integer vector containing the moved values.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_loadu_si256(__m256i_u const *__p)\n" "{\n" " struct __loadu_si256 {\n" " __m256i_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_si256*)__p)->__v;\n" "}\n" "\n" "/// Loads 256 bits of integer data from an unaligned memory location\n" "/// pointed to by \\a __p into a 256-bit integer vector. This intrinsic may\n" "/// perform better than \\c _mm256_loadu_si256 when the data crosses a cache\n" "/// line boundary.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VLDDQU instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 256-bit integer vector containing integer values.\n" "/// \\returns A 256-bit integer vector containing the moved values.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_lddqu_si256(__m256i_u const *__p)\n" "{\n" " return (__m256i)__builtin_ia32_lddqu256((char const *)__p);\n" "}\n" "\n" "/* SIMD store ops */\n" "/// Stores double-precision floating point values from a 256-bit vector\n" "/// of [4 x double] to a 32-byte aligned memory location pointed to by\n" "/// \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPD instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a memory location that will receive the\n" "/// double-precision floaing point values.\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_store_pd(double *__p, __m256d __a)\n" "{\n" " *(__m256d *)__p = __a;\n" "}\n" "\n" "/// Stores single-precision floating point values from a 256-bit vector\n" "/// of [8 x float] to a 32-byte aligned memory location pointed to by \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a memory location that will receive the\n" "/// float values.\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_store_ps(float *__p, __m256 __a)\n" "{\n" " *(__m256 *)__p = __a;\n" "}\n" "\n" "/// Stores double-precision floating point values from a 256-bit vector\n" "/// of [4 x double] to an unaligned memory location pointed to by \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the double-precision\n" "/// floating point values.\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu_pd(double *__p, __m256d __a)\n" "{\n" " struct __storeu_pd {\n" " __m256d_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_pd*)__p)->__v = __a;\n" "}\n" "\n" "/// Stores single-precision floating point values from a 256-bit vector\n" "/// of [8 x float] to an unaligned memory location pointed to by \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu_ps(float *__p, __m256 __a)\n" "{\n" " struct __storeu_ps {\n" " __m256_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_ps*)__p)->__v = __a;\n" "}\n" "\n" "/// Stores integer values from a 256-bit integer vector to a 32-byte\n" "/// aligned memory location pointed to by \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQA instruction.\n" "///\n" "/// \\param __p\n" "/// A 32-byte aligned pointer to a memory location that will receive the\n" "/// integer values.\n" "/// \\param __a\n" "/// A 256-bit integer vector containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_store_si256(__m256i *__p, __m256i __a)\n" "{\n" " *__p = __a;\n" "}\n" "\n" "/// Stores integer values from a 256-bit integer vector to an unaligned\n" "/// memory location pointed to by \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQU instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the integer values.\n" "/// \\param __a\n" "/// A 256-bit integer vector containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu_si256(__m256i_u *__p, __m256i __a)\n" "{\n" " struct __storeu_si256 {\n" " __m256i_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_si256*)__p)->__v = __a;\n" "}\n" "\n" "/* Conditional load ops */\n" "/// Conditionally loads double-precision floating point elements from a\n" "/// memory location pointed to by \\a __p into a 128-bit vector of\n" "/// [2 x double], depending on the mask bits associated with each data\n" "/// element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that contains the double-precision\n" "/// floating point values.\n" "/// \\param __m\n" "/// A 128-bit integer vector containing the mask. The most significant bit of\n" "/// each data element represents the mask bits. If a mask bit is zero, the\n" "/// corresponding value in the memory location is not loaded and the\n" "/// corresponding field in the return value is set to zero.\n" "/// \\returns A 128-bit vector of [2 x double] containing the loaded values.\n" "static __inline __m128d __DEFAULT_FN_ATTRS128\n" "_mm_maskload_pd(double const *__p, __m128i __m)\n" "{\n" " return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);\n" "}\n" "\n" "/// Conditionally loads double-precision floating point elements from a\n" "/// memory location pointed to by \\a __p into a 256-bit vector of\n" "/// [4 x double], depending on the mask bits associated with each data\n" "/// element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that contains the double-precision\n" "/// floating point values.\n" "/// \\param __m\n" "/// A 256-bit integer vector of [4 x quadword] containing the mask. The most\n" "/// significant bit of each quadword element represents the mask bits. If a\n" "/// mask bit is zero, the corresponding value in the memory location is not\n" "/// loaded and the corresponding field in the return value is set to zero.\n" "/// \\returns A 256-bit vector of [4 x double] containing the loaded values.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_maskload_pd(double const *__p, __m256i __m)\n" "{\n" " return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,\n" " (__v4di)__m);\n" "}\n" "\n" "/// Conditionally loads single-precision floating point elements from a\n" "/// memory location pointed to by \\a __p into a 128-bit vector of\n" "/// [4 x float], depending on the mask bits associated with each data\n" "/// element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that contains the single-precision\n" "/// floating point values.\n" "/// \\param __m\n" "/// A 128-bit integer vector containing the mask. The most significant bit of\n" "/// each data element represents the mask bits. If a mask bit is zero, the\n" "/// corresponding value in the memory location is not loaded and the\n" "/// corresponding field in the return value is set to zero.\n" "/// \\returns A 128-bit vector of [4 x float] containing the loaded values.\n" "static __inline __m128 __DEFAULT_FN_ATTRS128\n" "_mm_maskload_ps(float const *__p, __m128i __m)\n" "{\n" " return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);\n" "}\n" "\n" "/// Conditionally loads single-precision floating point elements from a\n" "/// memory location pointed to by \\a __p into a 256-bit vector of\n" "/// [8 x float], depending on the mask bits associated with each data\n" "/// element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that contains the single-precision\n" "/// floating point values.\n" "/// \\param __m\n" "/// A 256-bit integer vector of [8 x dword] containing the mask. The most\n" "/// significant bit of each dword element represents the mask bits. If a mask\n" "/// bit is zero, the corresponding value in the memory location is not loaded\n" "/// and the corresponding field in the return value is set to zero.\n" "/// \\returns A 256-bit vector of [8 x float] containing the loaded values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_maskload_ps(float const *__p, __m256i __m)\n" "{\n" " return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);\n" "}\n" "\n" "/* Conditional store ops */\n" "/// Moves single-precision floating point values from a 256-bit vector\n" "/// of [8 x float] to a memory location pointed to by \\a __p, according to\n" "/// the specified mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __m\n" "/// A 256-bit integer vector of [8 x dword] containing the mask. The most\n" "/// significant bit of each dword element in the mask vector represents the\n" "/// mask bits. If a mask bit is zero, the corresponding value from vector\n" "/// \\a __a is not stored and the corresponding field in the memory location\n" "/// pointed to by \\a __p is not changed.\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the values to be stored.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)\n" "{\n" " __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);\n" "}\n" "\n" "/// Moves double-precision values from a 128-bit vector of [2 x double]\n" "/// to a memory location pointed to by \\a __p, according to the specified\n" "/// mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __m\n" "/// A 128-bit integer vector containing the mask. The most significant bit of\n" "/// each field in the mask vector represents the mask bits. If a mask bit is\n" "/// zero, the corresponding value from vector \\a __a is not stored and the\n" "/// corresponding field in the memory location pointed to by \\a __p is not\n" "/// changed.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the values to be stored.\n" "static __inline void __DEFAULT_FN_ATTRS128\n" "_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)\n" "{\n" " __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);\n" "}\n" "\n" "/// Moves double-precision values from a 256-bit vector of [4 x double]\n" "/// to a memory location pointed to by \\a __p, according to the specified\n" "/// mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __m\n" "/// A 256-bit integer vector of [4 x quadword] containing the mask. The most\n" "/// significant bit of each quadword element in the mask vector represents\n" "/// the mask bits. If a mask bit is zero, the corresponding value from vector\n" "/// __a is not stored and the corresponding field in the memory location\n" "/// pointed to by \\a __p is not changed.\n" "/// \\param __a\n" "/// A 256-bit vector of [4 x double] containing the values to be stored.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)\n" "{\n" " __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);\n" "}\n" "\n" "/// Moves single-precision floating point values from a 128-bit vector\n" "/// of [4 x float] to a memory location pointed to by \\a __p, according to\n" "/// the specified mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __m\n" "/// A 128-bit integer vector containing the mask. The most significant bit of\n" "/// each field in the mask vector represents the mask bits. If a mask bit is\n" "/// zero, the corresponding value from vector __a is not stored and the\n" "/// corresponding field in the memory location pointed to by \\a __p is not\n" "/// changed.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline void __DEFAULT_FN_ATTRS128\n" "_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)\n" "{\n" " __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);\n" "}\n" "\n" "/* Cacheability support ops */\n" "/// Moves integer data from a 256-bit integer vector to a 32-byte\n" "/// aligned memory location. To minimize caching, the data is flagged as\n" "/// non-temporal (unlikely to be used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A pointer to a 32-byte aligned memory location that will receive the\n" "/// integer values.\n" "/// \\param __b\n" "/// A 256-bit integer vector containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_stream_si256(__m256i *__a, __m256i __b)\n" "{\n" " typedef __v4di __v4di_aligned __attribute__((aligned(32)));\n" " __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a);\n" "}\n" "\n" "/// Moves double-precision values from a 256-bit vector of [4 x double]\n" "/// to a 32-byte aligned memory location. To minimize caching, the data is\n" "/// flagged as non-temporal (unlikely to be used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A pointer to a 32-byte aligned memory location that will receive the\n" "/// double-precision floating-point values.\n" "/// \\param __b\n" "/// A 256-bit vector of [4 x double] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_stream_pd(double *__a, __m256d __b)\n" "{\n" " typedef __v4df __v4df_aligned __attribute__((aligned(32)));\n" " __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a);\n" "}\n" "\n" "/// Moves single-precision floating point values from a 256-bit vector\n" "/// of [8 x float] to a 32-byte aligned memory location. To minimize\n" "/// caching, the data is flagged as non-temporal (unlikely to be used again\n" "/// soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 32-byte aligned memory location that will receive the\n" "/// single-precision floating point values.\n" "/// \\param __a\n" "/// A 256-bit vector of [8 x float] containing the values to be moved.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_stream_ps(float *__p, __m256 __a)\n" "{\n" " typedef __v8sf __v8sf_aligned __attribute__((aligned(32)));\n" " __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p);\n" "}\n" "\n" "/* Create vectors */\n" "/// Create a 256-bit vector of [4 x double] with undefined values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 256-bit vector of [4 x double] containing undefined values.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS\n" "_mm256_undefined_pd(void)\n" "{\n" " return (__m256d)__builtin_ia32_undef256();\n" "}\n" "\n" "/// Create a 256-bit vector of [8 x float] with undefined values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 256-bit vector of [8 x float] containing undefined values.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS\n" "_mm256_undefined_ps(void)\n" "{\n" " return (__m256)__builtin_ia32_undef256();\n" "}\n" "\n" "/// Create a 256-bit integer vector with undefined values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 256-bit integer vector containing undefined values.\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS\n" "_mm256_undefined_si256(void)\n" "{\n" " return (__m256i)__builtin_ia32_undef256();\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [4 x double]\n" "/// initialized with the specified double-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A double-precision floating-point value used to initialize bits [255:192]\n" "/// of the result.\n" "/// \\param __b\n" "/// A double-precision floating-point value used to initialize bits [191:128]\n" "/// of the result.\n" "/// \\param __c\n" "/// A double-precision floating-point value used to initialize bits [127:64]\n" "/// of the result.\n" "/// \\param __d\n" "/// A double-precision floating-point value used to initialize bits [63:0]\n" "/// of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [4 x double].\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_set_pd(double __a, double __b, double __c, double __d)\n" "{\n" " return __extension__ (__m256d){ __d, __c, __b, __a };\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] initialized\n" "/// with the specified single-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A single-precision floating-point value used to initialize bits [255:224]\n" "/// of the result.\n" "/// \\param __b\n" "/// A single-precision floating-point value used to initialize bits [223:192]\n" "/// of the result.\n" "/// \\param __c\n" "/// A single-precision floating-point value used to initialize bits [191:160]\n" "/// of the result.\n" "/// \\param __d\n" "/// A single-precision floating-point value used to initialize bits [159:128]\n" "/// of the result.\n" "/// \\param __e\n" "/// A single-precision floating-point value used to initialize bits [127:96]\n" "/// of the result.\n" "/// \\param __f\n" "/// A single-precision floating-point value used to initialize bits [95:64]\n" "/// of the result.\n" "/// \\param __g\n" "/// A single-precision floating-point value used to initialize bits [63:32]\n" "/// of the result.\n" "/// \\param __h\n" "/// A single-precision floating-point value used to initialize bits [31:0]\n" "/// of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [8 x float].\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_set_ps(float __a, float __b, float __c, float __d,\n" " float __e, float __f, float __g, float __h)\n" "{\n" " return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };\n" "}\n" "\n" "/// Constructs a 256-bit integer vector initialized with the specified\n" "/// 32-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i0\n" "/// A 32-bit integral value used to initialize bits [255:224] of the result.\n" "/// \\param __i1\n" "/// A 32-bit integral value used to initialize bits [223:192] of the result.\n" "/// \\param __i2\n" "/// A 32-bit integral value used to initialize bits [191:160] of the result.\n" "/// \\param __i3\n" "/// A 32-bit integral value used to initialize bits [159:128] of the result.\n" "/// \\param __i4\n" "/// A 32-bit integral value used to initialize bits [127:96] of the result.\n" "/// \\param __i5\n" "/// A 32-bit integral value used to initialize bits [95:64] of the result.\n" "/// \\param __i6\n" "/// A 32-bit integral value used to initialize bits [63:32] of the result.\n" "/// \\param __i7\n" "/// A 32-bit integral value used to initialize bits [31:0] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,\n" " int __i4, int __i5, int __i6, int __i7)\n" "{\n" " return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };\n" "}\n" "\n" "/// Constructs a 256-bit integer vector initialized with the specified\n" "/// 16-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w15\n" "/// A 16-bit integral value used to initialize bits [255:240] of the result.\n" "/// \\param __w14\n" "/// A 16-bit integral value used to initialize bits [239:224] of the result.\n" "/// \\param __w13\n" "/// A 16-bit integral value used to initialize bits [223:208] of the result.\n" "/// \\param __w12\n" "/// A 16-bit integral value used to initialize bits [207:192] of the result.\n" "/// \\param __w11\n" "/// A 16-bit integral value used to initialize bits [191:176] of the result.\n" "/// \\param __w10\n" "/// A 16-bit integral value used to initialize bits [175:160] of the result.\n" "/// \\param __w09\n" "/// A 16-bit integral value used to initialize bits [159:144] of the result.\n" "/// \\param __w08\n" "/// A 16-bit integral value used to initialize bits [143:128] of the result.\n" "/// \\param __w07\n" "/// A 16-bit integral value used to initialize bits [127:112] of the result.\n" "/// \\param __w06\n" "/// A 16-bit integral value used to initialize bits [111:96] of the result.\n" "/// \\param __w05\n" "/// A 16-bit integral value used to initialize bits [95:80] of the result.\n" "/// \\param __w04\n" "/// A 16-bit integral value used to initialize bits [79:64] of the result.\n" "/// \\param __w03\n" "/// A 16-bit integral value used to initialize bits [63:48] of the result.\n" "/// \\param __w02\n" "/// A 16-bit integral value used to initialize bits [47:32] of the result.\n" "/// \\param __w01\n" "/// A 16-bit integral value used to initialize bits [31:16] of the result.\n" "/// \\param __w00\n" "/// A 16-bit integral value used to initialize bits [15:0] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,\n" " short __w11, short __w10, short __w09, short __w08,\n" " short __w07, short __w06, short __w05, short __w04,\n" " short __w03, short __w02, short __w01, short __w00)\n" "{\n" " return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,\n" " __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };\n" "}\n" "\n" "/// Constructs a 256-bit integer vector initialized with the specified\n" "/// 8-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b31\n" "/// An 8-bit integral value used to initialize bits [255:248] of the result.\n" "/// \\param __b30\n" "/// An 8-bit integral value used to initialize bits [247:240] of the result.\n" "/// \\param __b29\n" "/// An 8-bit integral value used to initialize bits [239:232] of the result.\n" "/// \\param __b28\n" "/// An 8-bit integral value used to initialize bits [231:224] of the result.\n" "/// \\param __b27\n" "/// An 8-bit integral value used to initialize bits [223:216] of the result.\n" "/// \\param __b26\n" "/// An 8-bit integral value used to initialize bits [215:208] of the result.\n" "/// \\param __b25\n" "/// An 8-bit integral value used to initialize bits [207:200] of the result.\n" "/// \\param __b24\n" "/// An 8-bit integral value used to initialize bits [199:192] of the result.\n" "/// \\param __b23\n" "/// An 8-bit integral value used to initialize bits [191:184] of the result.\n" "/// \\param __b22\n" "/// An 8-bit integral value used to initialize bits [183:176] of the result.\n" "/// \\param __b21\n" "/// An 8-bit integral value used to initialize bits [175:168] of the result.\n" "/// \\param __b20\n" "/// An 8-bit integral value used to initialize bits [167:160] of the result.\n" "/// \\param __b19\n" "/// An 8-bit integral value used to initialize bits [159:152] of the result.\n" "/// \\param __b18\n" "/// An 8-bit integral value used to initialize bits [151:144] of the result.\n" "/// \\param __b17\n" "/// An 8-bit integral value used to initialize bits [143:136] of the result.\n" "/// \\param __b16\n" "/// An 8-bit integral value used to initialize bits [135:128] of the result.\n" "/// \\param __b15\n" "/// An 8-bit integral value used to initialize bits [127:120] of the result.\n" "/// \\param __b14\n" "/// An 8-bit integral value used to initialize bits [119:112] of the result.\n" "/// \\param __b13\n" "/// An 8-bit integral value used to initialize bits [111:104] of the result.\n" "/// \\param __b12\n" "/// An 8-bit integral value used to initialize bits [103:96] of the result.\n" "/// \\param __b11\n" "/// An 8-bit integral value used to initialize bits [95:88] of the result.\n" "/// \\param __b10\n" "/// An 8-bit integral value used to initialize bits [87:80] of the result.\n" "/// \\param __b09\n" "/// An 8-bit integral value used to initialize bits [79:72] of the result.\n" "/// \\param __b08\n" "/// An 8-bit integral value used to initialize bits [71:64] of the result.\n" "/// \\param __b07\n" "/// An 8-bit integral value used to initialize bits [63:56] of the result.\n" "/// \\param __b06\n" "/// An 8-bit integral value used to initialize bits [55:48] of the result.\n" "/// \\param __b05\n" "/// An 8-bit integral value used to initialize bits [47:40] of the result.\n" "/// \\param __b04\n" "/// An 8-bit integral value used to initialize bits [39:32] of the result.\n" "/// \\param __b03\n" "/// An 8-bit integral value used to initialize bits [31:24] of the result.\n" "/// \\param __b02\n" "/// An 8-bit integral value used to initialize bits [23:16] of the result.\n" "/// \\param __b01\n" "/// An 8-bit integral value used to initialize bits [15:8] of the result.\n" "/// \\param __b00\n" "/// An 8-bit integral value used to initialize bits [7:0] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,\n" " char __b27, char __b26, char __b25, char __b24,\n" " char __b23, char __b22, char __b21, char __b20,\n" " char __b19, char __b18, char __b17, char __b16,\n" " char __b15, char __b14, char __b13, char __b12,\n" " char __b11, char __b10, char __b09, char __b08,\n" " char __b07, char __b06, char __b05, char __b04,\n" " char __b03, char __b02, char __b01, char __b00)\n" "{\n" " return __extension__ (__m256i)(__v32qi){\n" " __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,\n" " __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,\n" " __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,\n" " __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31\n" " };\n" "}\n" "\n" "/// Constructs a 256-bit integer vector initialized with the specified\n" "/// 64-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integral value used to initialize bits [255:192] of the result.\n" "/// \\param __b\n" "/// A 64-bit integral value used to initialize bits [191:128] of the result.\n" "/// \\param __c\n" "/// A 64-bit integral value used to initialize bits [127:64] of the result.\n" "/// \\param __d\n" "/// A 64-bit integral value used to initialize bits [63:0] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)\n" "{\n" " return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a };\n" "}\n" "\n" "/* Create vectors with elements in reverse order */\n" "/// Constructs a 256-bit floating-point vector of [4 x double],\n" "/// initialized in reverse order with the specified double-precision\n" "/// floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A double-precision floating-point value used to initialize bits [63:0]\n" "/// of the result.\n" "/// \\param __b\n" "/// A double-precision floating-point value used to initialize bits [127:64]\n" "/// of the result.\n" "/// \\param __c\n" "/// A double-precision floating-point value used to initialize bits [191:128]\n" "/// of the result.\n" "/// \\param __d\n" "/// A double-precision floating-point value used to initialize bits [255:192]\n" "/// of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [4 x double].\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_setr_pd(double __a, double __b, double __c, double __d)\n" "{\n" " return _mm256_set_pd(__d, __c, __b, __a);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float],\n" "/// initialized in reverse order with the specified single-precision\n" "/// float-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A single-precision floating-point value used to initialize bits [31:0]\n" "/// of the result.\n" "/// \\param __b\n" "/// A single-precision floating-point value used to initialize bits [63:32]\n" "/// of the result.\n" "/// \\param __c\n" "/// A single-precision floating-point value used to initialize bits [95:64]\n" "/// of the result.\n" "/// \\param __d\n" "/// A single-precision floating-point value used to initialize bits [127:96]\n" "/// of the result.\n" "/// \\param __e\n" "/// A single-precision floating-point value used to initialize bits [159:128]\n" "/// of the result.\n" "/// \\param __f\n" "/// A single-precision floating-point value used to initialize bits [191:160]\n" "/// of the result.\n" "/// \\param __g\n" "/// A single-precision floating-point value used to initialize bits [223:192]\n" "/// of the result.\n" "/// \\param __h\n" "/// A single-precision floating-point value used to initialize bits [255:224]\n" "/// of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [8 x float].\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_setr_ps(float __a, float __b, float __c, float __d,\n" " float __e, float __f, float __g, float __h)\n" "{\n" " return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector, initialized in reverse order\n" "/// with the specified 32-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i0\n" "/// A 32-bit integral value used to initialize bits [31:0] of the result.\n" "/// \\param __i1\n" "/// A 32-bit integral value used to initialize bits [63:32] of the result.\n" "/// \\param __i2\n" "/// A 32-bit integral value used to initialize bits [95:64] of the result.\n" "/// \\param __i3\n" "/// A 32-bit integral value used to initialize bits [127:96] of the result.\n" "/// \\param __i4\n" "/// A 32-bit integral value used to initialize bits [159:128] of the result.\n" "/// \\param __i5\n" "/// A 32-bit integral value used to initialize bits [191:160] of the result.\n" "/// \\param __i6\n" "/// A 32-bit integral value used to initialize bits [223:192] of the result.\n" "/// \\param __i7\n" "/// A 32-bit integral value used to initialize bits [255:224] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,\n" " int __i4, int __i5, int __i6, int __i7)\n" "{\n" " return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector, initialized in reverse order\n" "/// with the specified 16-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w15\n" "/// A 16-bit integral value used to initialize bits [15:0] of the result.\n" "/// \\param __w14\n" "/// A 16-bit integral value used to initialize bits [31:16] of the result.\n" "/// \\param __w13\n" "/// A 16-bit integral value used to initialize bits [47:32] of the result.\n" "/// \\param __w12\n" "/// A 16-bit integral value used to initialize bits [63:48] of the result.\n" "/// \\param __w11\n" "/// A 16-bit integral value used to initialize bits [79:64] of the result.\n" "/// \\param __w10\n" "/// A 16-bit integral value used to initialize bits [95:80] of the result.\n" "/// \\param __w09\n" "/// A 16-bit integral value used to initialize bits [111:96] of the result.\n" "/// \\param __w08\n" "/// A 16-bit integral value used to initialize bits [127:112] of the result.\n" "/// \\param __w07\n" "/// A 16-bit integral value used to initialize bits [143:128] of the result.\n" "/// \\param __w06\n" "/// A 16-bit integral value used to initialize bits [159:144] of the result.\n" "/// \\param __w05\n" "/// A 16-bit integral value used to initialize bits [175:160] of the result.\n" "/// \\param __w04\n" "/// A 16-bit integral value used to initialize bits [191:176] of the result.\n" "/// \\param __w03\n" "/// A 16-bit integral value used to initialize bits [207:192] of the result.\n" "/// \\param __w02\n" "/// A 16-bit integral value used to initialize bits [223:208] of the result.\n" "/// \\param __w01\n" "/// A 16-bit integral value used to initialize bits [239:224] of the result.\n" "/// \\param __w00\n" "/// A 16-bit integral value used to initialize bits [255:240] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,\n" " short __w11, short __w10, short __w09, short __w08,\n" " short __w07, short __w06, short __w05, short __w04,\n" " short __w03, short __w02, short __w01, short __w00)\n" "{\n" " return _mm256_set_epi16(__w00, __w01, __w02, __w03,\n" " __w04, __w05, __w06, __w07,\n" " __w08, __w09, __w10, __w11,\n" " __w12, __w13, __w14, __w15);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector, initialized in reverse order\n" "/// with the specified 8-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b31\n" "/// An 8-bit integral value used to initialize bits [7:0] of the result.\n" "/// \\param __b30\n" "/// An 8-bit integral value used to initialize bits [15:8] of the result.\n" "/// \\param __b29\n" "/// An 8-bit integral value used to initialize bits [23:16] of the result.\n" "/// \\param __b28\n" "/// An 8-bit integral value used to initialize bits [31:24] of the result.\n" "/// \\param __b27\n" "/// An 8-bit integral value used to initialize bits [39:32] of the result.\n" "/// \\param __b26\n" "/// An 8-bit integral value used to initialize bits [47:40] of the result.\n" "/// \\param __b25\n" "/// An 8-bit integral value used to initialize bits [55:48] of the result.\n" "/// \\param __b24\n" "/// An 8-bit integral value used to initialize bits [63:56] of the result.\n" "/// \\param __b23\n" "/// An 8-bit integral value used to initialize bits [71:64] of the result.\n" "/// \\param __b22\n" "/// An 8-bit integral value used to initialize bits [79:72] of the result.\n" "/// \\param __b21\n" "/// An 8-bit integral value used to initialize bits [87:80] of the result.\n" "/// \\param __b20\n" "/// An 8-bit integral value used to initialize bits [95:88] of the result.\n" "/// \\param __b19\n" "/// An 8-bit integral value used to initialize bits [103:96] of the result.\n" "/// \\param __b18\n" "/// An 8-bit integral value used to initialize bits [111:104] of the result.\n" "/// \\param __b17\n" "/// An 8-bit integral value used to initialize bits [119:112] of the result.\n" "/// \\param __b16\n" "/// An 8-bit integral value used to initialize bits [127:120] of the result.\n" "/// \\param __b15\n" "/// An 8-bit integral value used to initialize bits [135:128] of the result.\n" "/// \\param __b14\n" "/// An 8-bit integral value used to initialize bits [143:136] of the result.\n" "/// \\param __b13\n" "/// An 8-bit integral value used to initialize bits [151:144] of the result.\n" "/// \\param __b12\n" "/// An 8-bit integral value used to initialize bits [159:152] of the result.\n" "/// \\param __b11\n" "/// An 8-bit integral value used to initialize bits [167:160] of the result.\n" "/// \\param __b10\n" "/// An 8-bit integral value used to initialize bits [175:168] of the result.\n" "/// \\param __b09\n" "/// An 8-bit integral value used to initialize bits [183:176] of the result.\n" "/// \\param __b08\n" "/// An 8-bit integral value used to initialize bits [191:184] of the result.\n" "/// \\param __b07\n" "/// An 8-bit integral value used to initialize bits [199:192] of the result.\n" "/// \\param __b06\n" "/// An 8-bit integral value used to initialize bits [207:200] of the result.\n" "/// \\param __b05\n" "/// An 8-bit integral value used to initialize bits [215:208] of the result.\n" "/// \\param __b04\n" "/// An 8-bit integral value used to initialize bits [223:216] of the result.\n" "/// \\param __b03\n" "/// An 8-bit integral value used to initialize bits [231:224] of the result.\n" "/// \\param __b02\n" "/// An 8-bit integral value used to initialize bits [239:232] of the result.\n" "/// \\param __b01\n" "/// An 8-bit integral value used to initialize bits [247:240] of the result.\n" "/// \\param __b00\n" "/// An 8-bit integral value used to initialize bits [255:248] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,\n" " char __b27, char __b26, char __b25, char __b24,\n" " char __b23, char __b22, char __b21, char __b20,\n" " char __b19, char __b18, char __b17, char __b16,\n" " char __b15, char __b14, char __b13, char __b12,\n" " char __b11, char __b10, char __b09, char __b08,\n" " char __b07, char __b06, char __b05, char __b04,\n" " char __b03, char __b02, char __b01, char __b00)\n" "{\n" " return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,\n" " __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,\n" " __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,\n" " __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector, initialized in reverse order\n" "/// with the specified 64-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integral value used to initialize bits [63:0] of the result.\n" "/// \\param __b\n" "/// A 64-bit integral value used to initialize bits [127:64] of the result.\n" "/// \\param __c\n" "/// A 64-bit integral value used to initialize bits [191:128] of the result.\n" "/// \\param __d\n" "/// A 64-bit integral value used to initialize bits [255:192] of the result.\n" "/// \\returns An initialized 256-bit integer vector.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)\n" "{\n" " return _mm256_set_epi64x(__d, __c, __b, __a);\n" "}\n" "\n" "/* Create vectors with repeated elements */\n" "/// Constructs a 256-bit floating-point vector of [4 x double], with each\n" "/// of the four double-precision floating-point vector elements set to the\n" "/// specified double-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [4 x double].\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_set1_pd(double __w)\n" "{\n" " return _mm256_set_pd(__w, __w, __w, __w);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float], with each\n" "/// of the eight single-precision floating-point vector elements set to the\n" "/// specified single-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 256-bit floating-point vector of [8 x float].\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_set1_ps(float __w)\n" "{\n" " return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector of [8 x i32], with each of the\n" "/// 32-bit integral vector elements set to the specified 32-bit integral\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 \n" "/// instruction.\n" "///\n" "/// \\param __i\n" "/// A 32-bit integral value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 256-bit integer vector of [8 x i32].\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set1_epi32(int __i)\n" "{\n" " return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector of [16 x i16], with each of the\n" "/// 16-bit integral vector elements set to the specified 16-bit integral\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction.\n" "///\n" "/// \\param __w\n" "/// A 16-bit integral value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 256-bit integer vector of [16 x i16].\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set1_epi16(short __w)\n" "{\n" " return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w,\n" " __w, __w, __w, __w, __w, __w, __w, __w);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector of [32 x i8], with each of the\n" "/// 8-bit integral vector elements set to the specified 8-bit integral value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction.\n" "///\n" "/// \\param __b\n" "/// An 8-bit integral value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 256-bit integer vector of [32 x i8].\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set1_epi8(char __b)\n" "{\n" " return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b,\n" " __b, __b, __b, __b, __b, __b, __b, __b,\n" " __b, __b, __b, __b, __b, __b, __b, __b,\n" " __b, __b, __b, __b, __b, __b, __b, __b);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector of [4 x i64], with each of the\n" "/// 64-bit integral vector elements set to the specified 64-bit integral\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction.\n" "///\n" "/// \\param __q\n" "/// A 64-bit integral value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 256-bit integer vector of [4 x i64].\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set1_epi64x(long long __q)\n" "{\n" " return _mm256_set_epi64x(__q, __q, __q, __q);\n" "}\n" "\n" "/* Create __zeroed vectors */\n" "/// Constructs a 256-bit floating-point vector of [4 x double] with all\n" "/// vector elements initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS instruction.\n" "///\n" "/// \\returns A 256-bit vector of [4 x double] with all elements set to zero.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_setzero_pd(void)\n" "{\n" " return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 };\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] with all\n" "/// vector elements initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS instruction.\n" "///\n" "/// \\returns A 256-bit vector of [8 x float] with all elements set to zero.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_setzero_ps(void)\n" "{\n" " return __extension__ (__m256){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };\n" "}\n" "\n" "/// Constructs a 256-bit integer vector initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS instruction.\n" "///\n" "/// \\returns A 256-bit integer vector initialized to zero.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setzero_si256(void)\n" "{\n" " return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };\n" "}\n" "\n" "/* Cast between vector types */\n" "/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit\n" "/// floating-point vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double].\n" "/// \\returns A 256-bit floating-point vector of [8 x float] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_castpd_ps(__m256d __a)\n" "{\n" " return (__m256)__a;\n" "}\n" "\n" "/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit\n" "/// integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double].\n" "/// \\returns A 256-bit integer vector containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_castpd_si256(__m256d __a)\n" "{\n" " return (__m256i)__a;\n" "}\n" "\n" "/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit\n" "/// floating-point vector of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [8 x float].\n" "/// \\returns A 256-bit floating-point vector of [4 x double] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_castps_pd(__m256 __a)\n" "{\n" " return (__m256d)__a;\n" "}\n" "\n" "/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit\n" "/// integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [8 x float].\n" "/// \\returns A 256-bit integer vector containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_castps_si256(__m256 __a)\n" "{\n" " return (__m256i)__a;\n" "}\n" "\n" "/// Casts a 256-bit integer vector into a 256-bit floating-point vector\n" "/// of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit floating-point vector of [8 x float] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_castsi256_ps(__m256i __a)\n" "{\n" " return (__m256)__a;\n" "}\n" "\n" "/// Casts a 256-bit integer vector into a 256-bit floating-point vector\n" "/// of [4 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\returns A 256-bit floating-point vector of [4 x double] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_castsi256_pd(__m256i __a)\n" "{\n" " return (__m256d)__a;\n" "}\n" "\n" "/// Returns the lower 128 bits of a 256-bit floating-point vector of\n" "/// [4 x double] as a 128-bit floating-point vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double].\n" "/// \\returns A 128-bit floating-point vector of [2 x double] containing the\n" "/// lower 128 bits of the parameter.\n" "static __inline __m128d __DEFAULT_FN_ATTRS\n" "_mm256_castpd256_pd128(__m256d __a)\n" "{\n" " return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);\n" "}\n" "\n" "/// Returns the lower 128 bits of a 256-bit floating-point vector of\n" "/// [8 x float] as a 128-bit floating-point vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [8 x float].\n" "/// \\returns A 128-bit floating-point vector of [4 x float] containing the\n" "/// lower 128 bits of the parameter.\n" "static __inline __m128 __DEFAULT_FN_ATTRS\n" "_mm256_castps256_ps128(__m256 __a)\n" "{\n" " return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);\n" "}\n" "\n" "/// Truncates a 256-bit integer vector into a 128-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the lower 128 bits of the\n" "/// parameter.\n" "static __inline __m128i __DEFAULT_FN_ATTRS\n" "_mm256_castsi256_si128(__m256i __a)\n" "{\n" " return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [4 x double] from a\n" "/// 128-bit floating-point vector of [2 x double].\n" "///\n" "/// The lower 128 bits contain the value of the source vector. The contents\n" "/// of the upper 128 bits are undefined.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits\n" "/// contain the value of the parameter. The contents of the upper 128 bits\n" "/// are undefined.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_castpd128_pd256(__m128d __a)\n" "{\n" " return __builtin_shufflevector(\n" " (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] from a\n" "/// 128-bit floating-point vector of [4 x float].\n" "///\n" "/// The lower 128 bits contain the value of the source vector. The contents\n" "/// of the upper 128 bits are undefined.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits\n" "/// contain the value of the parameter. The contents of the upper 128 bits\n" "/// are undefined.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_castps128_ps256(__m128 __a)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a,\n" " (__v4sf)__builtin_nondeterministic_value(__a),\n" " 0, 1, 2, 3, 4, 5, 6, 7);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector from a 128-bit integer vector.\n" "///\n" "/// The lower 128 bits contain the value of the source vector. The contents\n" "/// of the upper 128 bits are undefined.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\returns A 256-bit integer vector. The lower 128 bits contain the value of\n" "/// the parameter. The contents of the upper 128 bits are undefined.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_castsi128_si256(__m128i __a)\n" "{\n" " return __builtin_shufflevector(\n" " (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [4 x double] from a\n" "/// 128-bit floating-point vector of [2 x double]. The lower 128 bits\n" "/// contain the value of the source vector. The upper 128 bits are set\n" "/// to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits\n" "/// contain the value of the parameter. The upper 128 bits are set to zero.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_zextpd128_pd256(__m128d __a)\n" "{\n" " return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] from a\n" "/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain\n" "/// the value of the source vector. The upper 128 bits are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits\n" "/// contain the value of the parameter. The upper 128 bits are set to zero.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_zextps128_ps256(__m128 __a)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector from a 128-bit integer vector.\n" "/// The lower 128 bits contain the value of the source vector. The upper\n" "/// 128 bits are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\returns A 256-bit integer vector. The lower 128 bits contain the value of\n" "/// the parameter. The upper 128 bits are set to zero.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_zextsi128_si256(__m128i __a)\n" "{\n" " return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3);\n" "}\n" "\n" "/*\n" " Vector insert.\n" " We use macros rather than inlines because we only want to accept\n" " invocations where the immediate M is a constant expression.\n" "*/\n" "/// Constructs a new 256-bit vector of [8 x float] by first duplicating\n" "/// a 256-bit vector of [8 x float] given in the first parameter, and then\n" "/// replacing either the upper or the lower 128 bits with the contents of a\n" "/// 128-bit vector of [4 x float] in the second parameter.\n" "///\n" "/// The immediate integer parameter determines between the upper or the lower\n" "/// 128 bits.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [8 x float]. This vector is copied to the result\n" "/// first, and then either the upper or the lower 128 bits of the result will\n" "/// be replaced by the contents of \\a V2.\n" "/// \\param V2\n" "/// A 128-bit vector of [4 x float]. The contents of this parameter are\n" "/// written to either the upper or the lower 128 bits of the result depending\n" "/// on the value of parameter \\a M.\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines how the values\n" "/// from the two parameters are interleaved: \\n\n" "/// If bit [0] of \\a M is 0, \\a V2 are copied to bits [127:0] of the result,\n" "/// and bits [255:128] of \\a V1 are copied to bits [255:128] of the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, \\a V2 are copied to bits [255:128] of the\n" "/// result, and bits [127:0] of \\a V1 are copied to bits [127:0] of the\n" "/// result.\n" "/// \\returns A 256-bit vector of [8 x float] containing the interleaved values.\n" "#define _mm256_insertf128_ps(V1, V2, M) \\\n" " ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \\\n" " (__v4sf)(__m128)(V2), (int)(M)))\n" "\n" "/// Constructs a new 256-bit vector of [4 x double] by first duplicating\n" "/// a 256-bit vector of [4 x double] given in the first parameter, and then\n" "/// replacing either the upper or the lower 128 bits with the contents of a\n" "/// 128-bit vector of [2 x double] in the second parameter.\n" "///\n" "/// The immediate integer parameter determines between the upper or the lower\n" "/// 128 bits.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit vector of [4 x double]. This vector is copied to the result\n" "/// first, and then either the upper or the lower 128 bits of the result will\n" "/// be replaced by the contents of \\a V2.\n" "/// \\param V2\n" "/// A 128-bit vector of [2 x double]. The contents of this parameter are\n" "/// written to either the upper or the lower 128 bits of the result depending\n" "/// on the value of parameter \\a M.\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines how the values\n" "/// from the two parameters are interleaved: \\n\n" "/// If bit [0] of \\a M is 0, \\a V2 are copied to bits [127:0] of the result,\n" "/// and bits [255:128] of \\a V1 are copied to bits [255:128] of the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, \\a V2 are copied to bits [255:128] of the\n" "/// result, and bits [127:0] of \\a V1 are copied to bits [127:0] of the\n" "/// result.\n" "/// \\returns A 256-bit vector of [4 x double] containing the interleaved values.\n" "#define _mm256_insertf128_pd(V1, V2, M) \\\n" " ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \\\n" " (__v2df)(__m128d)(V2), (int)(M)))\n" "\n" "/// Constructs a new 256-bit integer vector by first duplicating a\n" "/// 256-bit integer vector given in the first parameter, and then replacing\n" "/// either the upper or the lower 128 bits with the contents of a 128-bit\n" "/// integer vector in the second parameter.\n" "///\n" "/// The immediate integer parameter determines between the upper or the lower\n" "/// 128 bits.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param V1\n" "/// A 256-bit integer vector. This vector is copied to the result first, and\n" "/// then either the upper or the lower 128 bits of the result will be\n" "/// replaced by the contents of \\a V2.\n" "/// \\param V2\n" "/// A 128-bit integer vector. The contents of this parameter are written to\n" "/// either the upper or the lower 128 bits of the result depending on the\n" "/// value of parameter \\a M.\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines how the values\n" "/// from the two parameters are interleaved: \\n\n" "/// If bit [0] of \\a M is 0, \\a V2 are copied to bits [127:0] of the result,\n" "/// and bits [255:128] of \\a V1 are copied to bits [255:128] of the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, \\a V2 are copied to bits [255:128] of the\n" "/// result, and bits [127:0] of \\a V1 are copied to bits [127:0] of the\n" "/// result.\n" "/// \\returns A 256-bit integer vector containing the interleaved values.\n" "#define _mm256_insertf128_si256(V1, V2, M) \\\n" " ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \\\n" " (__v4si)(__m128i)(V2), (int)(M)))\n" "\n" "/*\n" " Vector extract.\n" " We use macros rather than inlines because we only want to accept\n" " invocations where the immediate M is a constant expression.\n" "*/\n" "/// Extracts either the upper or the lower 128 bits from a 256-bit vector\n" "/// of [8 x float], as determined by the immediate integer parameter, and\n" "/// returns the extracted bits as a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm256_extractf128_ps(__m256 V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [8 x float].\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines which bits are\n" "/// extracted from the first parameter: \\n\n" "/// If bit [0] of \\a M is 0, bits [127:0] of \\a V are copied to the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, bits [255:128] of \\a V are copied to the result.\n" "/// \\returns A 128-bit vector of [4 x float] containing the extracted bits.\n" "#define _mm256_extractf128_ps(V, M) \\\n" " ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M)))\n" "\n" "/// Extracts either the upper or the lower 128 bits from a 256-bit vector\n" "/// of [4 x double], as determined by the immediate integer parameter, and\n" "/// returns the extracted bits as a 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm256_extractf128_pd(__m256d V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit vector of [4 x double].\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines which bits are\n" "/// extracted from the first parameter: \\n\n" "/// If bit [0] of \\a M is 0, bits [127:0] of \\a V are copied to the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, bits [255:128] of \\a V are copied to the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the extracted bits.\n" "#define _mm256_extractf128_pd(V, M) \\\n" " ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M)))\n" "\n" "/// Extracts either the upper or the lower 128 bits from a 256-bit\n" "/// integer vector, as determined by the immediate integer parameter, and\n" "/// returns the extracted bits as a 128-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm256_extractf128_si256(__m256i V, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction.\n" "///\n" "/// \\param V\n" "/// A 256-bit integer vector.\n" "/// \\param M\n" "/// An immediate integer. The least significant bit determines which bits are\n" "/// extracted from the first parameter: \\n\n" "/// If bit [0] of \\a M is 0, bits [127:0] of \\a V are copied to the\n" "/// result. \\n\n" "/// If bit [0] of \\a M is 1, bits [255:128] of \\a V are copied to the result.\n" "/// \\returns A 128-bit integer vector containing the extracted bits.\n" "#define _mm256_extractf128_si256(V, M) \\\n" " ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M)))\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] by\n" "/// concatenating two 128-bit floating-point vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __hi\n" "/// A 128-bit floating-point vector of [4 x float] to be copied to the upper\n" "/// 128 bits of the result.\n" "/// \\param __lo\n" "/// A 128-bit floating-point vector of [4 x float] to be copied to the lower\n" "/// 128 bits of the result.\n" "/// \\returns A 256-bit floating-point vector of [8 x float] containing the\n" "/// concatenated result.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_set_m128 (__m128 __hi, __m128 __lo)\n" "{\n" " return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [4 x double] by\n" "/// concatenating two 128-bit floating-point vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __hi\n" "/// A 128-bit floating-point vector of [2 x double] to be copied to the upper\n" "/// 128 bits of the result.\n" "/// \\param __lo\n" "/// A 128-bit floating-point vector of [2 x double] to be copied to the lower\n" "/// 128 bits of the result.\n" "/// \\returns A 256-bit floating-point vector of [4 x double] containing the\n" "/// concatenated result.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_set_m128d (__m128d __hi, __m128d __lo)\n" "{\n" " return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector by concatenating two 128-bit\n" "/// integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __hi\n" "/// A 128-bit integer vector to be copied to the upper 128 bits of the\n" "/// result.\n" "/// \\param __lo\n" "/// A 128-bit integer vector to be copied to the lower 128 bits of the\n" "/// result.\n" "/// \\returns A 256-bit integer vector containing the concatenated result.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_set_m128i (__m128i __hi, __m128i __lo)\n" "{\n" " return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [8 x float] by\n" "/// concatenating two 128-bit floating-point vectors of [4 x float]. This is\n" "/// similar to _mm256_set_m128, but the order of the input parameters is\n" "/// swapped.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __lo\n" "/// A 128-bit floating-point vector of [4 x float] to be copied to the lower\n" "/// 128 bits of the result.\n" "/// \\param __hi\n" "/// A 128-bit floating-point vector of [4 x float] to be copied to the upper\n" "/// 128 bits of the result.\n" "/// \\returns A 256-bit floating-point vector of [8 x float] containing the\n" "/// concatenated result.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_setr_m128 (__m128 __lo, __m128 __hi)\n" "{\n" " return _mm256_set_m128(__hi, __lo);\n" "}\n" "\n" "/// Constructs a 256-bit floating-point vector of [4 x double] by\n" "/// concatenating two 128-bit floating-point vectors of [2 x double]. This is\n" "/// similar to _mm256_set_m128d, but the order of the input parameters is\n" "/// swapped.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __lo\n" "/// A 128-bit floating-point vector of [2 x double] to be copied to the lower\n" "/// 128 bits of the result.\n" "/// \\param __hi\n" "/// A 128-bit floating-point vector of [2 x double] to be copied to the upper\n" "/// 128 bits of the result.\n" "/// \\returns A 256-bit floating-point vector of [4 x double] containing the\n" "/// concatenated result.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_setr_m128d (__m128d __lo, __m128d __hi)\n" "{\n" " return (__m256d)_mm256_set_m128d(__hi, __lo);\n" "}\n" "\n" "/// Constructs a 256-bit integer vector by concatenating two 128-bit\n" "/// integer vectors. This is similar to _mm256_set_m128i, but the order of\n" "/// the input parameters is swapped.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VINSERTF128 instruction.\n" "///\n" "/// \\param __lo\n" "/// A 128-bit integer vector to be copied to the lower 128 bits of the\n" "/// result.\n" "/// \\param __hi\n" "/// A 128-bit integer vector to be copied to the upper 128 bits of the\n" "/// result.\n" "/// \\returns A 256-bit integer vector containing the concatenated result.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_setr_m128i (__m128i __lo, __m128i __hi)\n" "{\n" " return (__m256i)_mm256_set_m128i(__hi, __lo);\n" "}\n" "\n" "/* SIMD load ops (unaligned) */\n" "/// Loads two 128-bit floating-point vectors of [4 x float] from\n" "/// unaligned memory locations and constructs a 256-bit floating-point vector\n" "/// of [8 x float] by concatenating the two 128-bit vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to load instructions followed by the\n" "/// VINSERTF128 instruction.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location containing 4 consecutive\n" "/// single-precision floating-point values. These values are to be copied to\n" "/// bits[255:128] of the result. The address of the memory location does not\n" "/// have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location containing 4 consecutive\n" "/// single-precision floating-point values. These values are to be copied to\n" "/// bits[127:0] of the result. The address of the memory location does not\n" "/// have to be aligned.\n" "/// \\returns A 256-bit floating-point vector of [8 x float] containing the\n" "/// concatenated result.\n" "static __inline __m256 __DEFAULT_FN_ATTRS\n" "_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)\n" "{\n" " return _mm256_set_m128(_mm_loadu_ps(__addr_hi), _mm_loadu_ps(__addr_lo));\n" "}\n" "\n" "/// Loads two 128-bit floating-point vectors of [2 x double] from\n" "/// unaligned memory locations and constructs a 256-bit floating-point vector\n" "/// of [4 x double] by concatenating the two 128-bit vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to load instructions followed by the\n" "/// VINSERTF128 instruction.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location containing two consecutive\n" "/// double-precision floating-point values. These values are to be copied to\n" "/// bits[255:128] of the result. The address of the memory location does not\n" "/// have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location containing two consecutive\n" "/// double-precision floating-point values. These values are to be copied to\n" "/// bits[127:0] of the result. The address of the memory location does not\n" "/// have to be aligned.\n" "/// \\returns A 256-bit floating-point vector of [4 x double] containing the\n" "/// concatenated result.\n" "static __inline __m256d __DEFAULT_FN_ATTRS\n" "_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)\n" "{\n" " return _mm256_set_m128d(_mm_loadu_pd(__addr_hi), _mm_loadu_pd(__addr_lo));\n" "}\n" "\n" "/// Loads two 128-bit integer vectors from unaligned memory locations and\n" "/// constructs a 256-bit integer vector by concatenating the two 128-bit\n" "/// vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to load instructions followed by the\n" "/// VINSERTF128 instruction.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location containing a 128-bit integer\n" "/// vector. This vector is to be copied to bits[255:128] of the result. The\n" "/// address of the memory location does not have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location containing a 128-bit integer\n" "/// vector. This vector is to be copied to bits[127:0] of the result. The\n" "/// address of the memory location does not have to be aligned.\n" "/// \\returns A 256-bit integer vector containing the concatenated result.\n" "static __inline __m256i __DEFAULT_FN_ATTRS\n" "_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)\n" "{\n" " return _mm256_set_m128i(_mm_loadu_si128(__addr_hi), _mm_loadu_si128(__addr_lo));\n" "}\n" "\n" "/* SIMD store ops (unaligned) */\n" "/// Stores the upper and lower 128 bits of a 256-bit floating-point\n" "/// vector of [8 x float] into two different unaligned memory locations.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction and the\n" "/// store instructions.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location. Bits[255:128] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location. Bits[127:0] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [8 x float].\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)\n" "{\n" " __m128 __v128;\n" "\n" " __v128 = _mm256_castps256_ps128(__a);\n" " _mm_storeu_ps(__addr_lo, __v128);\n" " __v128 = _mm256_extractf128_ps(__a, 1);\n" " _mm_storeu_ps(__addr_hi, __v128);\n" "}\n" "\n" "/// Stores the upper and lower 128 bits of a 256-bit floating-point\n" "/// vector of [4 x double] into two different unaligned memory locations.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction and the\n" "/// store instructions.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location. Bits[255:128] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location. Bits[127:0] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __a\n" "/// A 256-bit floating-point vector of [4 x double].\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)\n" "{\n" " __m128d __v128;\n" "\n" " __v128 = _mm256_castpd256_pd128(__a);\n" " _mm_storeu_pd(__addr_lo, __v128);\n" " __v128 = _mm256_extractf128_pd(__a, 1);\n" " _mm_storeu_pd(__addr_hi, __v128);\n" "}\n" "\n" "/// Stores the upper and lower 128 bits of a 256-bit integer vector into\n" "/// two different unaligned memory locations.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VEXTRACTF128 instruction and the\n" "/// store instructions.\n" "///\n" "/// \\param __addr_hi\n" "/// A pointer to a 128-bit memory location. Bits[255:128] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __addr_lo\n" "/// A pointer to a 128-bit memory location. Bits[127:0] of \\a __a are to be\n" "/// copied to this memory location. The address of this memory location does\n" "/// not have to be aligned.\n" "/// \\param __a\n" "/// A 256-bit integer vector.\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)\n" "{\n" " __m128i __v128;\n" "\n" " __v128 = _mm256_castsi256_si128(__a);\n" " _mm_storeu_si128(__addr_lo, __v128);\n" " __v128 = _mm256_extractf128_si256(__a, 1);\n" " _mm_storeu_si128(__addr_hi, __v128);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS128\n" "\n" "#endif /* __AVXINTRIN_H */\n" "" } , { "/builtins/avxneconvertintrin.h" , "/*===-------------- avxneconvertintrin.h - AVXNECONVERT --------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \\\n" " \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifdef __SSE2__\n" "\n" "#ifndef __AVXNECONVERTINTRIN_H\n" "#define __AVXNECONVERTINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxneconvert\"), \\\n" " __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxneconvert\"), \\\n" " __min_vector_width__(256)))\n" "\n" "/// Convert scalar BF16 (16-bit) floating-point element\n" "/// stored at memory locations starting at location \\a __A to a\n" "/// single-precision (32-bit) floating-point, broadcast it to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_bcstnebf16_ps(const void *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VBCSTNEBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// b := Convert_BF16_To_FP32(MEM[__A+15:__A])\n" "/// FOR j := 0 to 3\n" "/// m := j*32\n" "/// dst[m+31:m] := b\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_bcstnebf16_ps(const void *__A) {\n" " return (__m128)__builtin_ia32_vbcstnebf162ps128((const __bf16 *)__A);\n" "}\n" "\n" "/// Convert scalar BF16 (16-bit) floating-point element\n" "/// stored at memory locations starting at location \\a __A to a\n" "/// single-precision (32-bit) floating-point, broadcast it to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_bcstnebf16_ps(const void *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VBCSTNEBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// b := Convert_BF16_To_FP32(MEM[__A+15:__A])\n" "/// FOR j := 0 to 7\n" "/// m := j*32\n" "/// dst[m+31:m] := b\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_bcstnebf16_ps(const void *__A) {\n" " return (__m256)__builtin_ia32_vbcstnebf162ps256((const __bf16 *)__A);\n" "}\n" "\n" "/// Convert scalar half-precision (16-bit) floating-point element\n" "/// stored at memory locations starting at location \\a __A to a\n" "/// single-precision (32-bit) floating-point, broadcast it to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_bcstnesh_ps(const void *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VBCSTNESH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// b := Convert_FP16_To_FP32(MEM[__A+15:__A])\n" "/// FOR j := 0 to 3\n" "/// m := j*32\n" "/// dst[m+31:m] := b\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_bcstnesh_ps(const void *__A) {\n" " return (__m128)__builtin_ia32_vbcstnesh2ps128((const _Float16 *)__A);\n" "}\n" "\n" "/// Convert scalar half-precision (16-bit) floating-point element\n" "/// stored at memory locations starting at location \\a __A to a\n" "/// single-precision (32-bit) floating-point, broadcast it to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_bcstnesh_ps(const void *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VBCSTNESH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// b := Convert_FP16_To_FP32(MEM[__A+15:__A])\n" "/// FOR j := 0 to 7\n" "/// m := j*32\n" "/// dst[m+31:m] := b\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_bcstnesh_ps(const void *__A) {\n" " return (__m256)__builtin_ia32_vbcstnesh2ps256((const _Float16 *)__A);\n" "}\n" "\n" "/// Convert packed BF16 (16-bit) floating-point even-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_cvtneebf16_ps(const __m128bh *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEEBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 128-bit memory location containing 8 consecutive\n" "/// BF16 (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// k := j*2\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_cvtneebf16_ps(const __m128bh *__A) {\n" " return (__m128)__builtin_ia32_vcvtneebf162ps128((const __v8bf *)__A);\n" "}\n" "\n" "/// Convert packed BF16 (16-bit) floating-point even-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_cvtneebf16_ps(const __m256bh *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEEBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 256-bit memory location containing 16 consecutive\n" "/// BF16 (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// k := j*2\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_cvtneebf16_ps(const __m256bh *__A) {\n" " return (__m256)__builtin_ia32_vcvtneebf162ps256((const __v16bf *)__A);\n" "}\n" "\n" "/// Convert packed half-precision (16-bit) floating-point even-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_cvtneeph_ps(const __m128h *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEEPH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 128-bit memory location containing 8 consecutive\n" "/// half-precision (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// k := j*2\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_cvtneeph_ps(const __m128h *__A) {\n" " return (__m128)__builtin_ia32_vcvtneeph2ps128((const __v8hf *)__A);\n" "}\n" "\n" "/// Convert packed half-precision (16-bit) floating-point even-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_cvtneeph_ps(const __m256h *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEEPH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 256-bit memory location containing 16 consecutive\n" "/// half-precision (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// k := j*2\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_cvtneeph_ps(const __m256h *__A) {\n" " return (__m256)__builtin_ia32_vcvtneeph2ps256((const __v16hf *)__A);\n" "}\n" "\n" "/// Convert packed BF16 (16-bit) floating-point odd-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_cvtneobf16_ps(const __m128bh *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEOBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 128-bit memory location containing 8 consecutive\n" "/// BF16 (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// k := j*2+1\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_cvtneobf16_ps(const __m128bh *__A) {\n" " return (__m128)__builtin_ia32_vcvtneobf162ps128((const __v8bf *)__A);\n" "}\n" "\n" "/// Convert packed BF16 (16-bit) floating-point odd-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_cvtneobf16_ps(const __m256bh *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEOBF162PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 256-bit memory location containing 16 consecutive\n" "/// BF16 (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// k := j*2+1\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_cvtneobf16_ps(const __m256bh *__A) {\n" " return (__m256)__builtin_ia32_vcvtneobf162ps256((const __v16bf *)__A);\n" "}\n" "\n" "/// Convert packed half-precision (16-bit) floating-point odd-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_cvtneoph_ps(const __m128h *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEOPH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 128-bit memory location containing 8 consecutive\n" "/// half-precision (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// k := j*2+1\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_cvtneoph_ps(const __m128h *__A) {\n" " return (__m128)__builtin_ia32_vcvtneoph2ps128((const __v8hf *)__A);\n" "}\n" "\n" "/// Convert packed half-precision (16-bit) floating-point odd-indexed elements\n" "/// stored at memory locations starting at location \\a __A to packed\n" "/// single-precision (32-bit) floating-point elements, and store the results in\n" "/// \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_cvtneoph_ps(const __m256h *__A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEOPH2PS instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 256-bit memory location containing 16 consecutive\n" "/// half-precision (16-bit) floating-point values.\n" "/// \\returns\n" "/// A 256-bit vector of [8 x float].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// k := j*2+1\n" "/// i := k*16\n" "/// m := j*32\n" "/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_cvtneoph_ps(const __m256h *__A) {\n" " return (__m256)__builtin_ia32_vcvtneoph2ps256((const __v16hf *)__A);\n" "}\n" "\n" "/// Convert packed single-precision (32-bit) floating-point elements in \\a __A\n" "/// to packed BF16 (16-bit) floating-point elements, and store the results in \\a\n" "/// dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_cvtneps_avx_pbh(__m128 __A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEPS2BF16 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns\n" "/// A 128-bit vector of [8 x bfloat].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128bh __DEFAULT_FN_ATTRS128\n" "_mm_cvtneps_avx_pbh(__m128 __A) {\n" " return (__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)__A);\n" "}\n" "\n" "/// Convert packed single-precision (32-bit) floating-point elements in \\a __A\n" "/// to packed BF16 (16-bit) floating-point elements, and store the results in \\a\n" "/// dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_cvtneps_avx_pbh(__m256 __A);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VCVTNEPS2BF16 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float].\n" "/// \\returns\n" "/// A 128-bit vector of [8 x bfloat].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128bh __DEFAULT_FN_ATTRS256\n" "_mm256_cvtneps_avx_pbh(__m256 __A) {\n" " return (__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)__A);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __AVXNECONVERTINTRIN_H\n" "#endif // __SSE2__\n" "" } , { "/builtins/avxvnniint16intrin.h" , "/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \\\n" " \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifndef __AVXVNNIINT16INTRIN_H\n" "#define __AVXVNNIINT16INTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnniint16\"), \\\n" " __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnniint16\"), \\\n" " __min_vector_width__(256)))\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUD instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUD instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding signed 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWUSD instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding signed 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWUSD instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding signed 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding signed 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWUUD instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x unsigned int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x unsigned int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWUUD instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x unsigned int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x unsigned int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 128-bit vector of [4 x unsigned int].\n" "/// \\param __A\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\param __B\n" "/// A 128-bit vector of [8 x unsigned short].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x unsigned int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \\a __A with\n" "/// corresponding unsigned 16-bit integers in \\a __B, producing 2 intermediate\n" "/// signed 16-bit results. Sum these 2 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPWSUDS instruction.\n" "///\n" "/// \\param __W\n" "/// A 256-bit vector of [8 x unsigned int].\n" "/// \\param __A\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\param __B\n" "/// A 256-bit vector of [16 x unsigned short].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x unsigned int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])\n" "/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])\n" "/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __AVXVNNIINT16INTRIN_H\n" "" } , { "/builtins/avxvnniint8intrin.h" , "/*===-------- avxvnniint8intrin.h - AVXVNNIINT8 intrinsics -----------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \\\n" " \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __AVXVNNIINT8INTRIN_H\n" "#define __AVXVNNIINT8INTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnniint8\"), \\\n" " __min_vector_width__(256)))\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnniint8\"), \\\n" " __min_vector_width__(128)))\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])\n" "/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssd_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbssd128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])\n" "/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbssd256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbssds_epi32( __m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])\n" "/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbssds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])\n" "/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbssds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsud_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbsud128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbsud256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbsuds_epi32( __m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsuds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbsuds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))\n" "/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbsuds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])\n" "/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuud_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbuud128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W, and store the packed 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBSSD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])\n" "/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbuud256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm_dpbuuds_epi32( __m128i __W, __m128i __A, __m128i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBUUDS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\param __B\n" "/// A 128-bit vector of [16 x unsigned char].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])\n" "/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuuds_epi32(__m128i __W,\n" " __m128i __A,\n" " __m128i __B) {\n" " return (__m128i)__builtin_ia32_vpdpbuuds128((__v4si)__W, (__v4si)__A,\n" " (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \\a __A with\n" "/// corresponding unsigned 8-bit integers in \\a __B, producing 4 intermediate\n" "/// signed 16-bit results. Sum these 4 results with the corresponding\n" "/// 32-bit integer in \\a __W with signed saturation, and store the packed\n" "/// 32-bit results in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// _mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VPDPBUUDS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\param __B\n" "/// A 256-bit vector of [32 x unsigned char].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])\n" "/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])\n" "/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])\n" "/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])\n" "/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vpdpbuuds256((__v8si)__W, (__v8si)__A,\n" " (__v8si)__B);\n" "}\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __AVXVNNIINT8INTRIN_H\n" "" } , { "/builtins/avxvnniintrin.h" , "/*===--------------- avxvnniintrin.h - VNNI intrinsics --------------------===\n" " *\n" " *\n" " * Permission is hereby granted, free of charge, to any person obtaining a copy\n" " * of this software and associated documentation files (the \"Software\"), to deal\n" " * in the Software without restriction, including without limitation the rights\n" " * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n" " * copies of the Software, and to permit persons to whom the Software is\n" " * furnished to do so, subject to the following conditions:\n" " *\n" " * The above copyright notice and this permission notice shall be included in\n" " * all copies or substantial portions of the Software.\n" " *\n" " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n" " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n" " * THE SOFTWARE.\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __AVXVNNIINTRIN_H\n" "#define __AVXVNNIINTRIN_H\n" "\n" "/* Below intrinsics defined in avx512vlvnniintrin.h can be used for AVXVNNI */\n" "/// \\fn __m256i _mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "/// \\fn __m256i _mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "/// \\fn __m256i _mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "/// \\fn __m256i _mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "/// \\fn __m128i _mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "/// \\fn __m128i _mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "/// \\fn __m128i _mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "/// \\fn __m128i _mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "\n" "/* Intrinsics with _avx_ prefix are for compatibility with msvc. */\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnni\"), __min_vector_width__(256)))\n" "#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__(\"avxvnni\"), __min_vector_width__(128)))\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate signed\n" "/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \\a __S, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPBUSD instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))\n" "/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// DST[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "{\n" " return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A, (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate signed\n" "/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \\a __S using signed saturation, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPBUSDS instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))\n" "/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// DST[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "{\n" " return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A, (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding 16-bit integers in \\a __B, producing 2 intermediate signed 32-bit\n" "/// results. Sum these 2 results with the corresponding 32-bit integer in \\a __S,\n" "/// and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPWSSD instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// DST[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "{\n" " return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A, (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding 16-bit integers in \\a __B, producing 2 intermediate signed 32-bit\n" "/// results. Sum these 2 results with the corresponding 32-bit integer in \\a __S\n" "/// using signed saturation, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPWSSDS instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 7\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// DST[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)\n" "{\n" " return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A, (__v8si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate signed\n" "/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \\a __S, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPBUSD instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))\n" "/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4\n" "/// ENDFOR\n" "/// DST[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \\a __A with\n" "/// corresponding signed 8-bit integers in \\a __B, producing 4 intermediate signed\n" "/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer\n" "/// in \\a __S using signed saturation, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPBUSDS instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))\n" "/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))\n" "/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]))\n" "/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]))\n" "/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)\n" "/// ENDFOR\n" "/// DST[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding 16-bit integers in \\a __B, producing 2 intermediate signed 32-bit\n" "/// results. Sum these 2 results with the corresponding 32-bit integer in \\a __S,\n" "/// and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPWSSD instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2\n" "/// ENDFOR\n" "/// DST[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \\a __A with\n" "/// corresponding 16-bit integers in \\a __B, producing 2 intermediate signed 32-bit\n" "/// results. Sum these 2 results with the corresponding 32-bit integer in \\a __S\n" "/// using signed saturation, and store the packed 32-bit results in DST.\n" "///\n" "/// This intrinsic corresponds to the VPDPWSSDS instructions.\n" "///\n" "/// \\code{.operation}\n" "/// FOR j := 0 to 3\n" "/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])\n" "/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])\n" "/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)\n" "/// ENDFOR\n" "/// DST[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128\n" "_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __AVXVNNIINTRIN_H\n" "" } , { "/builtins/bmi2intrin.h" , "/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __BMI2INTRIN_H\n" "#define __BMI2INTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"bmi2\")))\n" "\n" "/// Copies the unsigned 32-bit integer \\a __X and zeroes the upper bits\n" "/// starting at bit number \\a __Y.\n" "///\n" "/// \\code{.operation}\n" "/// i := __Y[7:0]\n" "/// result := __X\n" "/// IF i < 32\n" "/// result[31:i] := 0\n" "/// FI\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c BZHI instruction.\n" "///\n" "/// \\param __X\n" "/// The 32-bit source value to copy.\n" "/// \\param __Y\n" "/// The lower 8 bits specify the bit number of the lowest bit to zero.\n" "/// \\returns The partially zeroed 32-bit value.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_bzhi_u32(unsigned int __X, unsigned int __Y)\n" "{\n" " return __builtin_ia32_bzhi_si(__X, __Y);\n" "}\n" "\n" "/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \\a __X\n" "/// into the 32-bit result, according to the mask in the unsigned 32-bit\n" "/// integer \\a __Y. All other bits of the result are zero.\n" "///\n" "/// \\code{.operation}\n" "/// i := 0\n" "/// result := 0\n" "/// FOR m := 0 TO 31\n" "/// IF __Y[m] == 1\n" "/// result[m] := __X[i]\n" "/// i := i + 1\n" "/// ENDIF\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PDEP instruction.\n" "///\n" "/// \\param __X\n" "/// The 32-bit source value to copy.\n" "/// \\param __Y\n" "/// The 32-bit mask specifying where to deposit source bits.\n" "/// \\returns The 32-bit result.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_pdep_u32(unsigned int __X, unsigned int __Y)\n" "{\n" " return __builtin_ia32_pdep_si(__X, __Y);\n" "}\n" "\n" "/// Extract (gather) bits from the unsigned 32-bit integer \\a __X into the\n" "/// low-order bits of the 32-bit result, according to the mask in the\n" "/// unsigned 32-bit integer \\a __Y. All other bits of the result are zero.\n" "///\n" "/// \\code{.operation}\n" "/// i := 0\n" "/// result := 0\n" "/// FOR m := 0 TO 31\n" "/// IF __Y[m] == 1\n" "/// result[i] := __X[m]\n" "/// i := i + 1\n" "/// ENDIF\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PEXT instruction.\n" "///\n" "/// \\param __X\n" "/// The 32-bit source value to copy.\n" "/// \\param __Y\n" "/// The 32-bit mask specifying which source bits to extract.\n" "/// \\returns The 32-bit result.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_pext_u32(unsigned int __X, unsigned int __Y)\n" "{\n" " return __builtin_ia32_pext_si(__X, __Y);\n" "}\n" "\n" "/// Multiplies the unsigned 32-bit integers \\a __X and \\a __Y to form a\n" "/// 64-bit product. Stores the upper 32 bits of the product in the\n" "/// memory at \\a __P and returns the lower 32 bits.\n" "///\n" "/// \\code{.operation}\n" "/// Store32(__P, (__X * __Y)[63:32])\n" "/// result := (__X * __Y)[31:0]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MULX instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 32-bit multiplicand.\n" "/// \\param __Y\n" "/// An unsigned 32-bit multiplicand.\n" "/// \\param __P\n" "/// A pointer to memory for storing the upper half of the product.\n" "/// \\returns The lower half of the product.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P)\n" "{\n" " unsigned long long __res = (unsigned long long) __X * __Y;\n" " *__P = (unsigned int)(__res >> 32);\n" " return (unsigned int)__res;\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "/// Copies the unsigned 64-bit integer \\a __X and zeroes the upper bits\n" "/// starting at bit number \\a __Y.\n" "///\n" "/// \\code{.operation}\n" "/// i := __Y[7:0]\n" "/// result := __X\n" "/// IF i < 64\n" "/// result[63:i] := 0\n" "/// FI\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c BZHI instruction.\n" "///\n" "/// \\param __X\n" "/// The 64-bit source value to copy.\n" "/// \\param __Y\n" "/// The lower 8 bits specify the bit number of the lowest bit to zero.\n" "/// \\returns The partially zeroed 64-bit value.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_bzhi_u64(unsigned long long __X, unsigned long long __Y)\n" "{\n" " return __builtin_ia32_bzhi_di(__X, __Y);\n" "}\n" "\n" "/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \\a __X\n" "/// into the 64-bit result, according to the mask in the unsigned 64-bit\n" "/// integer \\a __Y. All other bits of the result are zero.\n" "///\n" "/// \\code{.operation}\n" "/// i := 0\n" "/// result := 0\n" "/// FOR m := 0 TO 63\n" "/// IF __Y[m] == 1\n" "/// result[m] := __X[i]\n" "/// i := i + 1\n" "/// ENDIF\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PDEP instruction.\n" "///\n" "/// \\param __X\n" "/// The 64-bit source value to copy.\n" "/// \\param __Y\n" "/// The 64-bit mask specifying where to deposit source bits.\n" "/// \\returns The 64-bit result.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_pdep_u64(unsigned long long __X, unsigned long long __Y)\n" "{\n" " return __builtin_ia32_pdep_di(__X, __Y);\n" "}\n" "\n" "/// Extract (gather) bits from the unsigned 64-bit integer \\a __X into the\n" "/// low-order bits of the 64-bit result, according to the mask in the\n" "/// unsigned 64-bit integer \\a __Y. All other bits of the result are zero.\n" "///\n" "/// \\code{.operation}\n" "/// i := 0\n" "/// result := 0\n" "/// FOR m := 0 TO 63\n" "/// IF __Y[m] == 1\n" "/// result[i] := __X[m]\n" "/// i := i + 1\n" "/// ENDIF\n" "/// ENDFOR\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PEXT instruction.\n" "///\n" "/// \\param __X\n" "/// The 64-bit source value to copy.\n" "/// \\param __Y\n" "/// The 64-bit mask specifying which source bits to extract.\n" "/// \\returns The 64-bit result.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_pext_u64(unsigned long long __X, unsigned long long __Y)\n" "{\n" " return __builtin_ia32_pext_di(__X, __Y);\n" "}\n" "\n" "/// Multiplies the unsigned 64-bit integers \\a __X and \\a __Y to form a\n" "/// 128-bit product. Stores the upper 64 bits of the product to the\n" "/// memory addressed by \\a __P and returns the lower 64 bits.\n" "///\n" "/// \\code{.operation}\n" "/// Store64(__P, (__X * __Y)[127:64])\n" "/// result := (__X * __Y)[63:0]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MULX instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit multiplicand.\n" "/// \\param __Y\n" "/// An unsigned 64-bit multiplicand.\n" "/// \\param __P\n" "/// A pointer to memory for storing the upper half of the product.\n" "/// \\returns The lower half of the product.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_mulx_u64 (unsigned long long __X, unsigned long long __Y,\n" " unsigned long long *__P)\n" "{\n" " unsigned __int128 __res = (unsigned __int128) __X * __Y;\n" " *__P = (unsigned long long) (__res >> 64);\n" " return (unsigned long long) __res;\n" "}\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __BMI2INTRIN_H */\n" "" } , { "/builtins/bmiintrin.h" , "/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __BMIINTRIN_H\n" "#define __BMIINTRIN_H\n" "\n" "/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT\n" " instruction behaves as BSF on non-BMI targets, there is code that expects\n" " to use it as a potentially faster version of BSF. */\n" "#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))\n" "\n" "#define _tzcnt_u16(a) (__tzcnt_u16((a)))\n" "\n" "/// Counts the number of trailing zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 16-bit integer whose trailing zeros are to be counted.\n" "/// \\returns An unsigned 16-bit integer containing the number of trailing zero\n" "/// bits in the operand.\n" "static __inline__ unsigned short __RELAXED_FN_ATTRS\n" "__tzcnt_u16(unsigned short __X)\n" "{\n" " return __builtin_ia32_tzcnt_u16(__X);\n" "}\n" "\n" "/// Counts the number of trailing zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 32-bit integer whose trailing zeros are to be counted.\n" "/// \\returns An unsigned 32-bit integer containing the number of trailing zero\n" "/// bits in the operand.\n" "/// \\see _mm_tzcnt_32\n" "static __inline__ unsigned int __RELAXED_FN_ATTRS\n" "__tzcnt_u32(unsigned int __X)\n" "{\n" " return __builtin_ia32_tzcnt_u32(__X);\n" "}\n" "\n" "/// Counts the number of trailing zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 32-bit integer whose trailing zeros are to be counted.\n" "/// \\returns An 32-bit integer containing the number of trailing zero bits in\n" "/// the operand.\n" "/// \\see __tzcnt_u32\n" "static __inline__ int __RELAXED_FN_ATTRS\n" "_mm_tzcnt_32(unsigned int __X)\n" "{\n" " return (int)__builtin_ia32_tzcnt_u32(__X);\n" "}\n" "\n" "#define _tzcnt_u32(a) (__tzcnt_u32((a)))\n" "\n" "#ifdef __x86_64__\n" "\n" "/// Counts the number of trailing zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose trailing zeros are to be counted.\n" "/// \\returns An unsigned 64-bit integer containing the number of trailing zero\n" "/// bits in the operand.\n" "/// \\see _mm_tzcnt_64\n" "static __inline__ unsigned long long __RELAXED_FN_ATTRS\n" "__tzcnt_u64(unsigned long long __X)\n" "{\n" " return __builtin_ia32_tzcnt_u64(__X);\n" "}\n" "\n" "/// Counts the number of trailing zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose trailing zeros are to be counted.\n" "/// \\returns An 64-bit integer containing the number of trailing zero bits in\n" "/// the operand.\n" "/// \\see __tzcnt_u64\n" "static __inline__ long long __RELAXED_FN_ATTRS\n" "_mm_tzcnt_64(unsigned long long __X)\n" "{\n" " return (long long)__builtin_ia32_tzcnt_u64(__X);\n" "}\n" "\n" "#define _tzcnt_u64(a) (__tzcnt_u64((a)))\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#undef __RELAXED_FN_ATTRS\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__BMI__)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"bmi\")))\n" "\n" "#define _andn_u32(a, b) (__andn_u32((a), (b)))\n" "\n" "/* _bextr_u32 != __bextr_u32 */\n" "#define _blsi_u32(a) (__blsi_u32((a)))\n" "\n" "#define _blsmsk_u32(a) (__blsmsk_u32((a)))\n" "\n" "#define _blsr_u32(a) (__blsr_u32((a)))\n" "\n" "/// Performs a bitwise AND of the second operand with the one's\n" "/// complement of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the ANDN instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer containing one of the operands.\n" "/// \\param __Y\n" "/// An unsigned integer containing one of the operands.\n" "/// \\returns An unsigned integer containing the bitwise AND of the second\n" "/// operand with the one's complement of the first operand.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__andn_u32(unsigned int __X, unsigned int __Y)\n" "{\n" " return ~__X & __Y;\n" "}\n" "\n" "/* AMD-specified, double-leading-underscore version of BEXTR */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned integer used to specify which bits are extracted. Bits [7:0]\n" "/// specify the index of the least significant bit. Bits [15:8] specify the\n" "/// number of bits to be extracted.\n" "/// \\returns An unsigned integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see _bextr_u32\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__bextr_u32(unsigned int __X, unsigned int __Y)\n" "{\n" " return __builtin_ia32_bextr_u32(__X, __Y);\n" "}\n" "\n" "/* Intel-specified, single-leading-underscore version of BEXTR */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned integer used to specify the index of the least significant\n" "/// bit for the bits to be extracted. Bits [7:0] specify the index.\n" "/// \\param __Z\n" "/// An unsigned integer used to specify the number of bits to be extracted.\n" "/// Bits [7:0] specify the number of bits.\n" "/// \\returns An unsigned integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see __bextr_u32\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)\n" "{\n" " return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));\n" "}\n" "\n" "/* Intel-specified, single-leading-underscore version of BEXTR2 */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned integer used to specify which bits are extracted. Bits [7:0]\n" "/// specify the index of the least significant bit. Bits [15:8] specify the\n" "/// number of bits to be extracted.\n" "/// \\returns An unsigned integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see __bextr_u32\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_bextr2_u32(unsigned int __X, unsigned int __Y) {\n" " return __builtin_ia32_bextr_u32(__X, __Y);\n" "}\n" "\n" "/// Clears all bits in the source except for the least significant bit\n" "/// containing a value of 1 and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSI instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer whose bits are to be cleared.\n" "/// \\returns An unsigned integer containing the result of clearing the bits from\n" "/// the source operand.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blsi_u32(unsigned int __X)\n" "{\n" " return __X & -__X;\n" "}\n" "\n" "/// Creates a mask whose bits are set to 1, using bit 0 up to and\n" "/// including the least significant bit that is set to 1 in the source\n" "/// operand and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSMSK instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer used to create the mask.\n" "/// \\returns An unsigned integer containing the newly created mask.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blsmsk_u32(unsigned int __X)\n" "{\n" " return __X ^ (__X - 1);\n" "}\n" "\n" "/// Clears the least significant bit that is set to 1 in the source\n" "/// operand and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned integer containing the operand to be cleared.\n" "/// \\returns An unsigned integer containing the result of clearing the source\n" "/// operand.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blsr_u32(unsigned int __X)\n" "{\n" " return __X & (__X - 1);\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "#define _andn_u64(a, b) (__andn_u64((a), (b)))\n" "\n" "/* _bextr_u64 != __bextr_u64 */\n" "#define _blsi_u64(a) (__blsi_u64((a)))\n" "\n" "#define _blsmsk_u64(a) (__blsmsk_u64((a)))\n" "\n" "#define _blsr_u64(a) (__blsr_u64((a)))\n" "\n" "/// Performs a bitwise AND of the second operand with the one's\n" "/// complement of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the ANDN instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer containing one of the operands.\n" "/// \\param __Y\n" "/// An unsigned 64-bit integer containing one of the operands.\n" "/// \\returns An unsigned 64-bit integer containing the bitwise AND of the second\n" "/// operand with the one's complement of the first operand.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__andn_u64 (unsigned long long __X, unsigned long long __Y)\n" "{\n" " return ~__X & __Y;\n" "}\n" "\n" "/* AMD-specified, double-leading-underscore version of BEXTR */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned 64-bit integer used to specify which bits are extracted. Bits\n" "/// [7:0] specify the index of the least significant bit. Bits [15:8] specify\n" "/// the number of bits to be extracted.\n" "/// \\returns An unsigned 64-bit integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see _bextr_u64\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__bextr_u64(unsigned long long __X, unsigned long long __Y)\n" "{\n" " return __builtin_ia32_bextr_u64(__X, __Y);\n" "}\n" "\n" "/* Intel-specified, single-leading-underscore version of BEXTR */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned integer used to specify the index of the least significant\n" "/// bit for the bits to be extracted. Bits [7:0] specify the index.\n" "/// \\param __Z\n" "/// An unsigned integer used to specify the number of bits to be extracted.\n" "/// Bits [7:0] specify the number of bits.\n" "/// \\returns An unsigned 64-bit integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see __bextr_u64\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)\n" "{\n" " return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));\n" "}\n" "\n" "/* Intel-specified, single-leading-underscore version of BEXTR2 */\n" "/// Extracts the specified bits from the first operand and returns them\n" "/// in the least significant bits of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BEXTR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose bits are to be extracted.\n" "/// \\param __Y\n" "/// An unsigned 64-bit integer used to specify which bits are extracted. Bits\n" "/// [7:0] specify the index of the least significant bit. Bits [15:8] specify\n" "/// the number of bits to be extracted.\n" "/// \\returns An unsigned 64-bit integer whose least significant bits contain the\n" "/// extracted bits.\n" "/// \\see __bextr_u64\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_bextr2_u64(unsigned long long __X, unsigned long long __Y) {\n" " return __builtin_ia32_bextr_u64(__X, __Y);\n" "}\n" "\n" "/// Clears all bits in the source except for the least significant bit\n" "/// containing a value of 1 and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSI instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose bits are to be cleared.\n" "/// \\returns An unsigned 64-bit integer containing the result of clearing the\n" "/// bits from the source operand.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blsi_u64(unsigned long long __X)\n" "{\n" " return __X & -__X;\n" "}\n" "\n" "/// Creates a mask whose bits are set to 1, using bit 0 up to and\n" "/// including the least significant bit that is set to 1 in the source\n" "/// operand and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSMSK instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer used to create the mask.\n" "/// \\returns An unsigned 64-bit integer containing the newly created mask.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blsmsk_u64(unsigned long long __X)\n" "{\n" " return __X ^ (__X - 1);\n" "}\n" "\n" "/// Clears the least significant bit that is set to 1 in the source\n" "/// operand and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the BLSR instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer containing the operand to be cleared.\n" "/// \\returns An unsigned 64-bit integer containing the result of clearing the\n" "/// source operand.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blsr_u64(unsigned long long __X)\n" "{\n" " return __X & (__X - 1);\n" "}\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \\\n" " || defined(__BMI__) */\n" "\n" "#endif /* __BMIINTRIN_H */\n" "" } , { "/builtins/builtins.h" , "/*===---- builtins.h - Standard header for extra builtins -----------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "/// Some legacy compilers have builtin definitions in a file named builtins.h.\n" "/// This header file has been added to allow compatibility with code that was\n" "/// written for those compilers. Code may have an include line for this file\n" "/// and to avoid an error an empty file with this name is provided.\n" "#ifndef __BUILTINS_H\n" "#define __BUILTINS_H\n" "\n" "#endif /* __BUILTINS_H */\n" "" } , { "/builtins/cet.h" , "/*===------ cet.h -Control-flow Enforcement Technology feature ------------===\n" " * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they\n" " * are enabled. Otherwise, contents in this header file are unused. This file\n" " * is mainly design for assembly source code which want to enable CET.\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __CET_H\n" "#define __CET_H\n" "\n" "#ifdef __ASSEMBLER__\n" "\n" "#ifndef __CET__\n" "# define _CET_ENDBR\n" "#endif\n" "\n" "#ifdef __CET__\n" "\n" "# ifdef __LP64__\n" "# if __CET__ & 0x1\n" "# define _CET_ENDBR endbr64\n" "# else\n" "# define _CET_ENDBR\n" "# endif\n" "# else\n" "# if __CET__ & 0x1\n" "# define _CET_ENDBR endbr32\n" "# else\n" "# define _CET_ENDBR\n" "# endif\n" "# endif\n" "\n" "\n" "# ifdef __LP64__\n" "# define __PROPERTY_ALIGN 3\n" "# else\n" "# define __PROPERTY_ALIGN 2\n" "# endif\n" "\n" " .pushsection \".note.gnu.property\", \"a\"\n" " .p2align __PROPERTY_ALIGN\n" " .long 1f - 0f /* name length. */\n" " .long 4f - 1f /* data length. */\n" " /* NT_GNU_PROPERTY_TYPE_0. */\n" " .long 5 /* note type. */\n" "0:\n" " .asciz \"GNU\" /* vendor name. */\n" "1:\n" " .p2align __PROPERTY_ALIGN\n" " /* GNU_PROPERTY_X86_FEATURE_1_AND. */\n" " .long 0xc0000002 /* pr_type. */\n" " .long 3f - 2f /* pr_datasz. */\n" "2:\n" " /* GNU_PROPERTY_X86_FEATURE_1_XXX. */\n" " .long __CET__\n" "3:\n" " .p2align __PROPERTY_ALIGN\n" "4:\n" " .popsection\n" "#endif\n" "#endif\n" "#endif\n" "" } , { "/builtins/cetintrin.h" , "/*===---- cetintrin.h - CET intrinsic --------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __CETINTRIN_H\n" "#define __CETINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"shstk\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {\n" " __builtin_ia32_incsspd((unsigned int)__a);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) {\n" " __builtin_ia32_incsspq(__a);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {\n" " __builtin_ia32_incsspq(__a);\n" "}\n" "#else /* __x86_64__ */\n" "static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {\n" " __builtin_ia32_incsspd(__a);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {\n" " return __builtin_ia32_rdsspd(__a);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) {\n" "#pragma clang diagnostic push\n" "#pragma clang diagnostic ignored \"-Wuninitialized\"\n" " unsigned int t;\n" " return __builtin_ia32_rdsspd(t);\n" "#pragma clang diagnostic pop\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {\n" " return __builtin_ia32_rdsspq(__a);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) {\n" "#pragma clang diagnostic push\n" "#pragma clang diagnostic ignored \"-Wuninitialized\"\n" " unsigned long long t;\n" " return __builtin_ia32_rdsspq(t);\n" "#pragma clang diagnostic pop\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "#ifdef __x86_64__\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) {\n" " return __builtin_ia32_rdsspq(0);\n" "}\n" "#else /* __x86_64__ */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {\n" " return __builtin_ia32_rdsspd(0);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) {\n" " __builtin_ia32_saveprevssp();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _rstorssp(void * __p) {\n" " __builtin_ia32_rstorssp(__p);\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _wrssd(unsigned int __a, void * __p) {\n" " __builtin_ia32_wrssd(__a, __p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS _wrssq(unsigned long long __a, void * __p) {\n" " __builtin_ia32_wrssq(__a, __p);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _wrussd(unsigned int __a, void * __p) {\n" " __builtin_ia32_wrussd(__a, __p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void * __p) {\n" " __builtin_ia32_wrussq(__a, __p);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) {\n" " __builtin_ia32_setssbsy();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS _clrssbsy(void * __p) {\n" " __builtin_ia32_clrssbsy(__p);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __CETINTRIN_H */\n" "" } , { "/builtins/cldemoteintrin.h" , "/*===---- cldemoteintrin.h - CLDEMOTE intrinsic ----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __CLDEMOTEINTRIN_H\n" "#define __CLDEMOTEINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"cldemote\")))\n" "\n" "/// Hint to hardware that the cache line that contains \\p __P should be demoted\n" "/// from the cache closest to the processor core to a level more distant from\n" "/// the processor core.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CLDEMOTE instruction.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_cldemote(const void * __P) {\n" " __builtin_ia32_cldemote(__P);\n" "}\n" "\n" "#define _mm_cldemote(p) _cldemote(p)\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/clflushoptintrin.h" , "/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __CLFLUSHOPTINTRIN_H\n" "#define __CLFLUSHOPTINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"clflushopt\")))\n" "\n" "/// Invalidates all levels of the cache hierarchy and flushes modified data to\n" "/// memory for the cache line specified by the address \\a __m.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c CLFLUSHOPT instruction.\n" "///\n" "/// \\param __m\n" "/// An address within the cache line to flush and invalidate.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_clflushopt(void const * __m) {\n" " __builtin_ia32_clflushopt(__m);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/clwbintrin.h" , "/*===---- clwbintrin.h - CLWB intrinsic ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __CLWBINTRIN_H\n" "#define __CLWBINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"clwb\")))\n" "\n" "/// Writes back to memory the cache line (if modified) that contains the\n" "/// linear address specified in \\a __p from any level of the cache hierarchy in\n" "/// the cache coherence domain\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CLWB instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the memory location used to identify the cache line to be\n" "/// written back.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_clwb(void const *__p) {\n" " __builtin_ia32_clwb(__p);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/clzerointrin.h" , "/*===----------------------- clzerointrin.h - CLZERO ----------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __CLZEROINTRIN_H\n" "#define __CLZEROINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"clzero\")))\n" "\n" "/// Zeroes out the cache line for the address \\a __line. This uses a\n" "/// non-temporal store. Calling \\c _mm_sfence() afterward might be needed\n" "/// to enforce ordering.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c CLZERO instruction.\n" "///\n" "/// \\param __line\n" "/// An address within the cache line to zero out.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_clzero (void * __line)\n" "{\n" " __builtin_ia32_clzero ((void *)__line);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __CLZEROINTRIN_H */\n" "" } , { "/builtins/cmpccxaddintrin.h" , "/*===--------------- cmpccxaddintrin.h - CMPCCXADD intrinsics--------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __X86GPRINTRIN_H\n" "#error \\\n" " \"Never use directly; include instead.\"\n" "#endif // __X86GPRINTRIN_H\n" "\n" "#ifndef __CMPCCXADDINTRIN_H\n" "#define __CMPCCXADDINTRIN_H\n" "#ifdef __x86_64__\n" "\n" "typedef enum {\n" " _CMPCCX_O, /* Overflow. */\n" " _CMPCCX_NO, /* No overflow. */\n" " _CMPCCX_B, /* Below. */\n" " _CMPCCX_NB, /* Not below. */\n" " _CMPCCX_Z, /* Zero. */\n" " _CMPCCX_NZ, /* Not zero. */\n" " _CMPCCX_BE, /* Below or equal. */\n" " _CMPCCX_NBE, /* Neither below nor equal. */\n" " _CMPCCX_S, /* Sign. */\n" " _CMPCCX_NS, /* No sign. */\n" " _CMPCCX_P, /* Parity. */\n" " _CMPCCX_NP, /* No parity. */\n" " _CMPCCX_L, /* Less. */\n" " _CMPCCX_NL, /* Not less. */\n" " _CMPCCX_LE, /* Less or equal. */\n" " _CMPCCX_NLE, /* Neither less nor equal. */\n" "} _CMPCCX_ENUM;\n" "\n" "/// Compares the value from the memory __A with the value of __B. If the\n" "/// specified condition __D is met, then add the third operand __C to the\n" "/// __A and write it into __A, else the value of __A is unchanged. The return\n" "/// value is the original value of __A.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c CMPCCXADD instructions.\n" "///\n" "/// \\param __A\n" "/// __A pointer specifying the memory address.\n" "///\n" "/// \\param __B\n" "/// A integer operand.\n" "///\n" "/// \\param __C\n" "/// A integer operand.\n" "///\n" "/// \\param __D\n" "/// The specified condition.\n" "///\n" "/// \\returns a integer which is the original value of first operand.\n" "\n" "#define _cmpccxadd_epi32(__A, __B, __C, __D) \\\n" " ((int)(__builtin_ia32_cmpccxadd32((void *)(__A), (int)(__B), (int)(__C), \\\n" " (int)(__D))))\n" "\n" "#define _cmpccxadd_epi64(__A, __B, __C, __D) \\\n" " ((long long)(__builtin_ia32_cmpccxadd64((void *)(__A), (long long)(__B), \\\n" " (long long)(__C), (int)(__D))))\n" "\n" "#endif // __x86_64__\n" "#endif // __CMPCCXADDINTRIN_H\n" "" } , { "/builtins/cpuid.h" , "/*===---- cpuid.h - X86 cpu model detection --------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CPUID_H\n" "#define __CPUID_H\n" "\n" "#if !(__x86_64__ || __i386__)\n" "#error this header is for x86 only\n" "#endif\n" "\n" "/* Responses identification request with %eax 0 */\n" "/* AMD: \"AuthenticAMD\" */\n" "#define signature_AMD_ebx 0x68747541\n" "#define signature_AMD_edx 0x69746e65\n" "#define signature_AMD_ecx 0x444d4163\n" "/* CENTAUR: \"CentaurHauls\" */\n" "#define signature_CENTAUR_ebx 0x746e6543\n" "#define signature_CENTAUR_edx 0x48727561\n" "#define signature_CENTAUR_ecx 0x736c7561\n" "/* CYRIX: \"CyrixInstead\" */\n" "#define signature_CYRIX_ebx 0x69727943\n" "#define signature_CYRIX_edx 0x736e4978\n" "#define signature_CYRIX_ecx 0x64616574\n" "/* HYGON: \"HygonGenuine\" */\n" "#define signature_HYGON_ebx 0x6f677948\n" "#define signature_HYGON_edx 0x6e65476e\n" "#define signature_HYGON_ecx 0x656e6975\n" "/* INTEL: \"GenuineIntel\" */\n" "#define signature_INTEL_ebx 0x756e6547\n" "#define signature_INTEL_edx 0x49656e69\n" "#define signature_INTEL_ecx 0x6c65746e\n" "/* TM1: \"TransmetaCPU\" */\n" "#define signature_TM1_ebx 0x6e617254\n" "#define signature_TM1_edx 0x74656d73\n" "#define signature_TM1_ecx 0x55504361\n" "/* TM2: \"GenuineTMx86\" */\n" "#define signature_TM2_ebx 0x756e6547\n" "#define signature_TM2_edx 0x54656e69\n" "#define signature_TM2_ecx 0x3638784d\n" "/* NSC: \"Geode by NSC\" */\n" "#define signature_NSC_ebx 0x646f6547\n" "#define signature_NSC_edx 0x79622065\n" "#define signature_NSC_ecx 0x43534e20\n" "/* NEXGEN: \"NexGenDriven\" */\n" "#define signature_NEXGEN_ebx 0x4778654e\n" "#define signature_NEXGEN_edx 0x72446e65\n" "#define signature_NEXGEN_ecx 0x6e657669\n" "/* RISE: \"RiseRiseRise\" */\n" "#define signature_RISE_ebx 0x65736952\n" "#define signature_RISE_edx 0x65736952\n" "#define signature_RISE_ecx 0x65736952\n" "/* SIS: \"SiS SiS SiS \" */\n" "#define signature_SIS_ebx 0x20536953\n" "#define signature_SIS_edx 0x20536953\n" "#define signature_SIS_ecx 0x20536953\n" "/* UMC: \"UMC UMC UMC \" */\n" "#define signature_UMC_ebx 0x20434d55\n" "#define signature_UMC_edx 0x20434d55\n" "#define signature_UMC_ecx 0x20434d55\n" "/* VIA: \"VIA VIA VIA \" */\n" "#define signature_VIA_ebx 0x20414956\n" "#define signature_VIA_edx 0x20414956\n" "#define signature_VIA_ecx 0x20414956\n" "/* VORTEX: \"Vortex86 SoC\" */\n" "#define signature_VORTEX_ebx 0x74726f56\n" "#define signature_VORTEX_edx 0x36387865\n" "#define signature_VORTEX_ecx 0x436f5320\n" "\n" "/* Features in %ecx for leaf 1 */\n" "#define bit_SSE3 0x00000001\n" "#define bit_PCLMULQDQ 0x00000002\n" "#define bit_PCLMUL bit_PCLMULQDQ /* for gcc compat */\n" "#define bit_DTES64 0x00000004\n" "#define bit_MONITOR 0x00000008\n" "#define bit_DSCPL 0x00000010\n" "#define bit_VMX 0x00000020\n" "#define bit_SMX 0x00000040\n" "#define bit_EIST 0x00000080\n" "#define bit_TM2 0x00000100\n" "#define bit_SSSE3 0x00000200\n" "#define bit_CNXTID 0x00000400\n" "#define bit_FMA 0x00001000\n" "#define bit_CMPXCHG16B 0x00002000\n" "#define bit_xTPR 0x00004000\n" "#define bit_PDCM 0x00008000\n" "#define bit_PCID 0x00020000\n" "#define bit_DCA 0x00040000\n" "#define bit_SSE41 0x00080000\n" "#define bit_SSE4_1 bit_SSE41 /* for gcc compat */\n" "#define bit_SSE42 0x00100000\n" "#define bit_SSE4_2 bit_SSE42 /* for gcc compat */\n" "#define bit_x2APIC 0x00200000\n" "#define bit_MOVBE 0x00400000\n" "#define bit_POPCNT 0x00800000\n" "#define bit_TSCDeadline 0x01000000\n" "#define bit_AESNI 0x02000000\n" "#define bit_AES bit_AESNI /* for gcc compat */\n" "#define bit_XSAVE 0x04000000\n" "#define bit_OSXSAVE 0x08000000\n" "#define bit_AVX 0x10000000\n" "#define bit_F16C 0x20000000\n" "#define bit_RDRND 0x40000000\n" "\n" "/* Features in %edx for leaf 1 */\n" "#define bit_FPU 0x00000001\n" "#define bit_VME 0x00000002\n" "#define bit_DE 0x00000004\n" "#define bit_PSE 0x00000008\n" "#define bit_TSC 0x00000010\n" "#define bit_MSR 0x00000020\n" "#define bit_PAE 0x00000040\n" "#define bit_MCE 0x00000080\n" "#define bit_CX8 0x00000100\n" "#define bit_CMPXCHG8B bit_CX8 /* for gcc compat */\n" "#define bit_APIC 0x00000200\n" "#define bit_SEP 0x00000800\n" "#define bit_MTRR 0x00001000\n" "#define bit_PGE 0x00002000\n" "#define bit_MCA 0x00004000\n" "#define bit_CMOV 0x00008000\n" "#define bit_PAT 0x00010000\n" "#define bit_PSE36 0x00020000\n" "#define bit_PSN 0x00040000\n" "#define bit_CLFSH 0x00080000\n" "#define bit_DS 0x00200000\n" "#define bit_ACPI 0x00400000\n" "#define bit_MMX 0x00800000\n" "#define bit_FXSR 0x01000000\n" "#define bit_FXSAVE bit_FXSR /* for gcc compat */\n" "#define bit_SSE 0x02000000\n" "#define bit_SSE2 0x04000000\n" "#define bit_SS 0x08000000\n" "#define bit_HTT 0x10000000\n" "#define bit_TM 0x20000000\n" "#define bit_PBE 0x80000000\n" "\n" "/* Features in %ebx for leaf 7 sub-leaf 0 */\n" "#define bit_FSGSBASE 0x00000001\n" "#define bit_SGX 0x00000004\n" "#define bit_BMI 0x00000008\n" "#define bit_HLE 0x00000010\n" "#define bit_AVX2 0x00000020\n" "#define bit_SMEP 0x00000080\n" "#define bit_BMI2 0x00000100\n" "#define bit_ENH_MOVSB 0x00000200\n" "#define bit_INVPCID 0x00000400\n" "#define bit_RTM 0x00000800\n" "#define bit_MPX 0x00004000\n" "#define bit_AVX512F 0x00010000\n" "#define bit_AVX512DQ 0x00020000\n" "#define bit_RDSEED 0x00040000\n" "#define bit_ADX 0x00080000\n" "#define bit_AVX512IFMA 0x00200000\n" "#define bit_CLFLUSHOPT 0x00800000\n" "#define bit_CLWB 0x01000000\n" "#define bit_AVX512PF 0x04000000\n" "#define bit_AVX512ER 0x08000000\n" "#define bit_AVX512CD 0x10000000\n" "#define bit_SHA 0x20000000\n" "#define bit_AVX512BW 0x40000000\n" "#define bit_AVX512VL 0x80000000\n" "\n" "/* Features in %ecx for leaf 7 sub-leaf 0 */\n" "#define bit_PREFTCHWT1 0x00000001\n" "#define bit_AVX512VBMI 0x00000002\n" "#define bit_PKU 0x00000004\n" "#define bit_OSPKE 0x00000010\n" "#define bit_WAITPKG 0x00000020\n" "#define bit_AVX512VBMI2 0x00000040\n" "#define bit_SHSTK 0x00000080\n" "#define bit_GFNI 0x00000100\n" "#define bit_VAES 0x00000200\n" "#define bit_VPCLMULQDQ 0x00000400\n" "#define bit_AVX512VNNI 0x00000800\n" "#define bit_AVX512BITALG 0x00001000\n" "#define bit_AVX512VPOPCNTDQ 0x00004000\n" "#define bit_RDPID 0x00400000\n" "#define bit_CLDEMOTE 0x02000000\n" "#define bit_MOVDIRI 0x08000000\n" "#define bit_MOVDIR64B 0x10000000\n" "#define bit_ENQCMD 0x20000000\n" "\n" "/* Features in %edx for leaf 7 sub-leaf 0 */\n" "#define bit_AVX5124VNNIW 0x00000004\n" "#define bit_AVX5124FMAPS 0x00000008\n" "#define bit_UINTR 0x00000020\n" "#define bit_SERIALIZE 0x00004000\n" "#define bit_TSXLDTRK 0x00010000\n" "#define bit_PCONFIG 0x00040000\n" "#define bit_IBT 0x00100000\n" "#define bit_AMXBF16 0x00400000\n" "#define bit_AVX512FP16 0x00800000\n" "#define bit_AMXTILE 0x01000000\n" "#define bit_AMXINT8 0x02000000\n" "\n" "/* Features in %eax for leaf 7 sub-leaf 1 */\n" "#define bit_RAOINT 0x00000008\n" "#define bit_AVXVNNI 0x00000010\n" "#define bit_AVX512BF16 0x00000020\n" "#define bit_CMPCCXADD 0x00000080\n" "#define bit_AMXFP16 0x00200000\n" "#define bit_HRESET 0x00400000\n" "#define bit_AVXIFMA 0x00800000\n" "\n" "/* Features in %edx for leaf 7 sub-leaf 1 */\n" "#define bit_AVXVNNIINT8 0x00000010\n" "#define bit_AVXNECONVERT 0x00000020\n" "#define bit_PREFETCHI 0x00004000\n" "\n" "/* Features in %eax for leaf 13 sub-leaf 1 */\n" "#define bit_XSAVEOPT 0x00000001\n" "#define bit_XSAVEC 0x00000002\n" "#define bit_XSAVES 0x00000008\n" "\n" "/* Features in %eax for leaf 0x14 sub-leaf 0 */\n" "#define bit_PTWRITE 0x00000010\n" "\n" "/* Features in %ecx for leaf 0x80000001 */\n" "#define bit_LAHF_LM 0x00000001\n" "#define bit_ABM 0x00000020\n" "#define bit_LZCNT bit_ABM /* for gcc compat */\n" "#define bit_SSE4a 0x00000040\n" "#define bit_PRFCHW 0x00000100\n" "#define bit_XOP 0x00000800\n" "#define bit_LWP 0x00008000\n" "#define bit_FMA4 0x00010000\n" "#define bit_TBM 0x00200000\n" "#define bit_MWAITX 0x20000000\n" "\n" "/* Features in %edx for leaf 0x80000001 */\n" "#define bit_MMXEXT 0x00400000\n" "#define bit_LM 0x20000000\n" "#define bit_3DNOWP 0x40000000\n" "#define bit_3DNOW 0x80000000\n" "\n" "/* Features in %ebx for leaf 0x80000008 */\n" "#define bit_CLZERO 0x00000001\n" "#define bit_RDPRU 0x00000010\n" "#define bit_WBNOINVD 0x00000200\n" "\n" "\n" "#if __i386__\n" "#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \\\n" " __asm(\"cpuid\" : \"=a\"(__eax), \"=b\" (__ebx), \"=c\"(__ecx), \"=d\"(__edx) \\\n" " : \"0\"(__leaf))\n" "\n" "#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \\\n" " __asm(\"cpuid\" : \"=a\"(__eax), \"=b\" (__ebx), \"=c\"(__ecx), \"=d\"(__edx) \\\n" " : \"0\"(__leaf), \"2\"(__count))\n" "#else\n" "/* x86-64 uses %rbx as the base register, so preserve it. */\n" "#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \\\n" " __asm(\" xchgq %%rbx,%q1\\n\" \\\n" " \" cpuid\\n\" \\\n" " \" xchgq %%rbx,%q1\" \\\n" " : \"=a\"(__eax), \"=r\" (__ebx), \"=c\"(__ecx), \"=d\"(__edx) \\\n" " : \"0\"(__leaf))\n" "\n" "#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \\\n" " __asm(\" xchgq %%rbx,%q1\\n\" \\\n" " \" cpuid\\n\" \\\n" " \" xchgq %%rbx,%q1\" \\\n" " : \"=a\"(__eax), \"=r\" (__ebx), \"=c\"(__ecx), \"=d\"(__edx) \\\n" " : \"0\"(__leaf), \"2\"(__count))\n" "#endif\n" "\n" "static __inline unsigned int __get_cpuid_max (unsigned int __leaf,\n" " unsigned int *__sig)\n" "{\n" " unsigned int __eax, __ebx, __ecx, __edx;\n" "#if __i386__\n" " int __cpuid_supported;\n" "\n" " __asm(\" pushfl\\n\"\n" " \" popl %%eax\\n\"\n" " \" movl %%eax,%%ecx\\n\"\n" " \" xorl $0x00200000,%%eax\\n\"\n" " \" pushl %%eax\\n\"\n" " \" popfl\\n\"\n" " \" pushfl\\n\"\n" " \" popl %%eax\\n\"\n" " \" movl $0,%0\\n\"\n" " \" cmpl %%eax,%%ecx\\n\"\n" " \" je 1f\\n\"\n" " \" movl $1,%0\\n\"\n" " \"1:\"\n" " : \"=r\" (__cpuid_supported) : : \"eax\", \"ecx\");\n" " if (!__cpuid_supported)\n" " return 0;\n" "#endif\n" "\n" " __cpuid(__leaf, __eax, __ebx, __ecx, __edx);\n" " if (__sig)\n" " *__sig = __ebx;\n" " return __eax;\n" "}\n" "\n" "static __inline int __get_cpuid (unsigned int __leaf, unsigned int *__eax,\n" " unsigned int *__ebx, unsigned int *__ecx,\n" " unsigned int *__edx)\n" "{\n" " unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);\n" "\n" " if (__max_leaf == 0 || __max_leaf < __leaf)\n" " return 0;\n" "\n" " __cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx);\n" " return 1;\n" "}\n" "\n" "static __inline int __get_cpuid_count (unsigned int __leaf,\n" " unsigned int __subleaf,\n" " unsigned int *__eax, unsigned int *__ebx,\n" " unsigned int *__ecx, unsigned int *__edx)\n" "{\n" " unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);\n" "\n" " if (__max_leaf == 0 || __max_leaf < __leaf)\n" " return 0;\n" "\n" " __cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx);\n" " return 1;\n" "}\n" "\n" "#endif /* __CPUID_H */\n" "" } , { "/builtins/crc32intrin.h" , "/*===---- crc32intrin.h - SSE4.2 Accumulate CRC32 intrinsics ---------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CRC32INTRIN_H\n" "#define __CRC32INTRIN_H\n" "\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"crc32\")))\n" "\n" "/// Adds the unsigned integer operand to the CRC-32C checksum of the\n" "/// unsigned char operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CRC32B instruction.\n" "///\n" "/// \\param __C\n" "/// An unsigned integer operand to add to the CRC-32C checksum of operand\n" "/// \\a __D.\n" "/// \\param __D\n" "/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum.\n" "/// \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" "/// operand \\a __D.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mm_crc32_u8(unsigned int __C, unsigned char __D)\n" "{\n" " return __builtin_ia32_crc32qi(__C, __D);\n" "}\n" "\n" "/// Adds the unsigned integer operand to the CRC-32C checksum of the\n" "/// unsigned short operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CRC32W instruction.\n" "///\n" "/// \\param __C\n" "/// An unsigned integer operand to add to the CRC-32C checksum of operand\n" "/// \\a __D.\n" "/// \\param __D\n" "/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum.\n" "/// \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" "/// operand \\a __D.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mm_crc32_u16(unsigned int __C, unsigned short __D)\n" "{\n" " return __builtin_ia32_crc32hi(__C, __D);\n" "}\n" "\n" "/// Adds the first unsigned integer operand to the CRC-32C checksum of\n" "/// the second unsigned integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CRC32L instruction.\n" "///\n" "/// \\param __C\n" "/// An unsigned integer operand to add to the CRC-32C checksum of operand\n" "/// \\a __D.\n" "/// \\param __D\n" "/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum.\n" "/// \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" "/// operand \\a __D.\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mm_crc32_u32(unsigned int __C, unsigned int __D)\n" "{\n" " return __builtin_ia32_crc32si(__C, __D);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Adds the unsigned integer operand to the CRC-32C checksum of the\n" "/// unsigned 64-bit integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CRC32Q instruction.\n" "///\n" "/// \\param __C\n" "/// An unsigned integer operand to add to the CRC-32C checksum of operand\n" "/// \\a __D.\n" "/// \\param __D\n" "/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum.\n" "/// \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" "/// operand \\a __D.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_mm_crc32_u64(unsigned long long __C, unsigned long long __D)\n" "{\n" " return __builtin_ia32_crc32di(__C, __D);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __CRC32INTRIN_H */\n" "" } , { "/builtins/emmintrin.h" , "/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __EMMINTRIN_H\n" "#define __EMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));\n" "\n" "typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));\n" "typedef long long __m128i_u\n" " __attribute__((__vector_size__(16), __aligned__(1)));\n" "\n" "/* Type defines. */\n" "typedef double __v2df __attribute__((__vector_size__(16)));\n" "typedef long long __v2di __attribute__((__vector_size__(16)));\n" "typedef short __v8hi __attribute__((__vector_size__(16)));\n" "typedef char __v16qi __attribute__((__vector_size__(16)));\n" "\n" "/* Unsigned types */\n" "typedef unsigned long long __v2du __attribute__((__vector_size__(16)));\n" "typedef unsigned short __v8hu __attribute__((__vector_size__(16)));\n" "typedef unsigned char __v16qu __attribute__((__vector_size__(16)));\n" "\n" "/* We need an explicitly signed variant for char. Note that this shouldn't\n" " * appear in the interface though. */\n" "typedef signed char __v16qs __attribute__((__vector_size__(16)));\n" "\n" "#ifdef __SSE2__\n" "/* Both _Float16 and __bf16 require SSE2 being enabled. */\n" "typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1)));\n" "\n" "typedef __bf16 __v8bf __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef __bf16 __m128bh __attribute__((__vector_size__(16), __aligned__(16)));\n" "#endif\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse2\"), \\\n" " __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS_MMX \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"mmx,sse2\"), \\\n" " __min_vector_width__(64)))\n" "\n" "/// Adds lower double-precision values in both operands and returns the\n" "/// sum in the lower 64 bits of the result. The upper 64 bits of the result\n" "/// are copied from the upper double-precision value of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSD / ADDSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// sum of the lower 64 bits of both operands. The upper 64 bits are copied\n" "/// from the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a,\n" " __m128d __b) {\n" " __a[0] += __b[0];\n" " return __a;\n" "}\n" "\n" "/// Adds two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDPD / ADDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the sums of both\n" "/// operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2df)__a + (__v2df)__b);\n" "}\n" "\n" "/// Subtracts the lower double-precision value of the second operand\n" "/// from the lower double-precision value of the first operand and returns\n" "/// the difference in the lower 64 bits of the result. The upper 64 bits of\n" "/// the result are copied from the upper double-precision value of the first\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBSD / SUBSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the minuend.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// difference of the lower 64 bits of both operands. The upper 64 bits are\n" "/// copied from the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a,\n" " __m128d __b) {\n" " __a[0] -= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Subtracts two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBPD / SUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the minuend.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the differences between\n" "/// both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2df)__a - (__v2df)__b);\n" "}\n" "\n" "/// Multiplies lower double-precision values in both operands and returns\n" "/// the product in the lower 64 bits of the result. The upper 64 bits of the\n" "/// result are copied from the upper double-precision value of the first\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULSD / MULSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// product of the lower 64 bits of both operands. The upper 64 bits are\n" "/// copied from the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a,\n" " __m128d __b) {\n" " __a[0] *= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Multiplies two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULPD / MULPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the products of both\n" "/// operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2df)__a * (__v2df)__b);\n" "}\n" "\n" "/// Divides the lower double-precision value of the first operand by the\n" "/// lower double-precision value of the second operand and returns the\n" "/// quotient in the lower 64 bits of the result. The upper 64 bits of the\n" "/// result are copied from the upper double-precision value of the first\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVSD / DIVSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the dividend.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing divisor.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// quotient of the lower 64 bits of both operands. The upper 64 bits are\n" "/// copied from the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a,\n" " __m128d __b) {\n" " __a[0] /= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Performs an element-by-element division of two 128-bit vectors of\n" "/// [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVPD / DIVPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the dividend.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing the divisor.\n" "/// \\returns A 128-bit vector of [2 x double] containing the quotients of both\n" "/// operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2df)__a / (__v2df)__b);\n" "}\n" "\n" "/// Calculates the square root of the lower double-precision value of\n" "/// the second operand and returns it in the lower 64 bits of the result.\n" "/// The upper 64 bits of the result are copied from the upper\n" "/// double-precision value of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTSD / SQRTSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// upper 64 bits of this operand are copied to the upper 64 bits of the\n" "/// result.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// square root is calculated using the lower 64 bits of this operand.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// square root of the lower 64 bits of operand \\a __b, and whose upper 64\n" "/// bits are copied from the upper 64 bits of operand \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a,\n" " __m128d __b) {\n" " __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);\n" " return __extension__(__m128d){__c[0], __a[1]};\n" "}\n" "\n" "/// Calculates the square root of the each of two values stored in a\n" "/// 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTPD / SQRTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector of [2 x double] containing the square roots of the\n" "/// values in the operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {\n" " return __builtin_ia32_sqrtpd((__v2df)__a);\n" "}\n" "\n" "/// Compares lower 64-bit double-precision values of both operands, and\n" "/// returns the lesser of the pair of values in the lower 64-bits of the\n" "/// result. The upper 64 bits of the result are copied from the upper\n" "/// double-precision value of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINSD / MINSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// lower 64 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// lower 64 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// minimum value between both operands. The upper 64 bits are copied from\n" "/// the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Performs element-by-element comparison of the two 128-bit vectors of\n" "/// [2 x double] and returns the vector containing the lesser of each pair of\n" "/// values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINPD / MINPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the minimum values\n" "/// between both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares lower 64-bit double-precision values of both operands, and\n" "/// returns the greater of the pair of values in the lower 64-bits of the\n" "/// result. The upper 64 bits of the result are copied from the upper\n" "/// double-precision value of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXSD / MAXSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// lower 64 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands. The\n" "/// lower 64 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// maximum value between both operands. The upper 64 bits are copied from\n" "/// the upper 64 bits of the first source operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Performs element-by-element comparison of the two 128-bit vectors of\n" "/// [2 x double] and returns the vector containing the greater of each pair\n" "/// of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXPD / MAXPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the maximum values\n" "/// between both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPAND / PAND instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the bitwise AND of the\n" "/// values between both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2du)__a & (__v2du)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using\n" "/// the one's complement of the values contained in the first source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPANDN / PANDN instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the left source operand. The\n" "/// one's complement of this value is used in the bitwise AND.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing the right source operand.\n" "/// \\returns A 128-bit vector of [2 x double] containing the bitwise AND of the\n" "/// values in the second operand and the one's complement of the first\n" "/// operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)(~(__v2du)__a & (__v2du)__b);\n" "}\n" "\n" "/// Performs a bitwise OR of two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPOR / POR instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the bitwise OR of the\n" "/// values between both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2du)__a | (__v2du)__b);\n" "}\n" "\n" "/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPXOR / PXOR instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [2 x double] containing the bitwise XOR of the\n" "/// values between both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)((__v2du)__a ^ (__v2du)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0\n" "/// for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPEQPD / CMPEQPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are less than those in the second operand. Each comparison\n" "/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are less than or equal to those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are greater than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are greater than or equal to those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are ordered with respect to those in the second operand.\n" "///\n" "/// A pair of double-precision values are \"ordered\" with respect to each\n" "/// other if neither value is a NaN. Each comparison yields 0x0 for false,\n" "/// 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPORDPD / CMPORDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are unordered with respect to those in the second operand.\n" "///\n" "/// A pair of double-precision values are \"unordered\" with respect to each\n" "/// other if one or both values are NaN. Each comparison yields 0x0 for\n" "/// false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPUNORDPD / CMPUNORDPD \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are unequal to those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNEQPD / CMPNEQPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are not less than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are not less than or equal to those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are not greater than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);\n" "}\n" "\n" "/// Compares each of the corresponding double-precision values of the\n" "/// 128-bit vectors of [2 x double] to determine if the values in the first\n" "/// operand are not greater than or equal to those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector containing the comparison results.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] for equality.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPEQSD / CMPEQSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than the corresponding value\n" "/// in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,\n" " __m128d __b) {\n" " __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);\n" " return __extension__(__m128d){__c[0], __a[1]};\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,\n" " __m128d __b) {\n" " __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);\n" " return __extension__(__m128d){__c[0], __a[1]};\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is \"ordered\" with respect to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair\n" "/// of double-precision values are \"ordered\" with respect to each other if\n" "/// neither value is a NaN.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPORDSD / CMPORDSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is \"unordered\" with respect to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair\n" "/// of double-precision values are \"unordered\" with respect to each other if\n" "/// one or both values are NaN.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPUNORDSD / CMPUNORDSD \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is unequal to the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNEQSD / CMPNEQSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is not less than the corresponding\n" "/// value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is not less than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,\n" " __m128d __b) {\n" " return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is not greater than the corresponding\n" "/// value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,\n" " __m128d __b) {\n" " __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);\n" " return __extension__(__m128d){__c[0], __a[1]};\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is not greater than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns A 128-bit vector. The lower 64 bits contains the comparison\n" "/// results. The upper 64 bits are copied from the upper 64 bits of \\a __a.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,\n" " __m128d __b) {\n" " __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);\n" " return __extension__(__m128d){__c[0], __a[1]};\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] for equality.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than the corresponding value\n" "/// in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is unequal to the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 1 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISD / COMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 1 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] for equality. The\n" "/// comparison yields 0 for false, 1 for true.\n" "///\n" "/// If either of the two lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two lower\n" "/// double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is less than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two lower\n" "/// double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than the corresponding value\n" "/// in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two lower\n" "/// double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is greater than or equal to the\n" "/// corresponding value in the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower double-precision values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Compares the lower double-precision floating-point values in each of\n" "/// the two 128-bit floating-point vectors of [2 x double] to determine if\n" "/// the value in the first parameter is unequal to the corresponding value in\n" "/// the second parameter.\n" "///\n" "/// The comparison yields 0 for false, 1 for true. If either of the two lower\n" "/// double-precision values is NaN, 1 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __b.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision value is\n" "/// compared to the lower double-precision value of \\a __a.\n" "/// \\returns An integer containing the comparison result. If either of the two\n" "/// lower double-precision values is NaN, 1 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Converts the two double-precision floating-point elements of a\n" "/// 128-bit vector of [2 x double] into two single-precision floating-point\n" "/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float].\n" "/// The upper 64 bits of the result vector are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPD2PS / CVTPD2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 64 bits contain the\n" "/// converted values. The upper 64 bits are set to zero.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) {\n" " return __builtin_ia32_cvtpd2ps((__v2df)__a);\n" "}\n" "\n" "/// Converts the lower two single-precision floating-point elements of a\n" "/// 128-bit vector of [4 x float] into two double-precision floating-point\n" "/// values, returned in a 128-bit vector of [2 x double]. The upper two\n" "/// elements of the input vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPS2PD / CVTPS2PD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower two single-precision\n" "/// floating-point elements are converted to double-precision values. The\n" "/// upper two elements are unused.\n" "/// \\returns A 128-bit vector of [2 x double] containing the converted values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) {\n" " return (__m128d) __builtin_convertvector(\n" " __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);\n" "}\n" "\n" "/// Converts the lower two integer elements of a 128-bit vector of\n" "/// [4 x i32] into two double-precision floating-point values, returned in a\n" "/// 128-bit vector of [2 x double].\n" "///\n" "/// The upper two elements of the input vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTDQ2PD / CVTDQ2PD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector of [4 x i32]. The lower two integer elements are\n" "/// converted to double-precision values.\n" "///\n" "/// The upper two elements are unused.\n" "/// \\returns A 128-bit vector of [2 x double] containing the converted values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {\n" " return (__m128d) __builtin_convertvector(\n" " __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);\n" "}\n" "\n" "/// Converts the two double-precision floating-point elements of a\n" "/// 128-bit vector of [2 x double] into two signed 32-bit integer values,\n" "/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper\n" "/// 64 bits of the result vector are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the\n" "/// converted values. The upper 64 bits are set to zero.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {\n" " return __builtin_ia32_cvtpd2dq((__v2df)__a);\n" "}\n" "\n" "/// Converts the low-order element of a 128-bit vector of [2 x double]\n" "/// into a 32-bit signed integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the\n" "/// conversion.\n" "/// \\returns A 32-bit signed integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) {\n" " return __builtin_ia32_cvtsd2si((__v2df)__a);\n" "}\n" "\n" "/// Converts the lower double-precision floating-point element of a\n" "/// 128-bit vector of [2 x double], in the second parameter, into a\n" "/// single-precision floating-point value, returned in the lower 32 bits of a\n" "/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are\n" "/// copied from the upper 96 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSD2SS / CVTSD2SS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are\n" "/// copied to the upper 96 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower double-precision\n" "/// floating-point element is used in the conversion.\n" "/// \\returns A 128-bit vector of [4 x float]. The lower 32 bits contain the\n" "/// converted value from the second parameter. The upper 96 bits are copied\n" "/// from the upper 96 bits of the first parameter.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a,\n" " __m128d __b) {\n" " return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);\n" "}\n" "\n" "/// Converts a 32-bit signed integer value, in the second parameter, into\n" "/// a double-precision floating-point value, returned in the lower 64 bits of\n" "/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector\n" "/// are copied from the upper 64 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are\n" "/// copied to the upper 64 bits of the result.\n" "/// \\param __b\n" "/// A 32-bit signed integer containing the value to be converted.\n" "/// \\returns A 128-bit vector of [2 x double]. The lower 64 bits contain the\n" "/// converted value from the second parameter. The upper 64 bits are copied\n" "/// from the upper 64 bits of the first parameter.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a,\n" " int __b) {\n" " __a[0] = __b;\n" " return __a;\n" "}\n" "\n" "/// Converts the lower single-precision floating-point element of a\n" "/// 128-bit vector of [4 x float], in the second parameter, into a\n" "/// double-precision floating-point value, returned in the lower 64 bits of\n" "/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector\n" "/// are copied from the upper 64 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSS2SD / CVTSS2SD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are\n" "/// copied to the upper 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower single-precision\n" "/// floating-point element is used in the conversion.\n" "/// \\returns A 128-bit vector of [2 x double]. The lower 64 bits contain the\n" "/// converted value from the second parameter. The upper 64 bits are copied\n" "/// from the upper 64 bits of the first parameter.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,\n" " __m128 __b) {\n" " __a[0] = __b[0];\n" " return __a;\n" "}\n" "\n" "/// Converts the two double-precision floating-point elements of a\n" "/// 128-bit vector of [2 x double] into two signed 32-bit integer values,\n" "/// returned in the lower 64 bits of a 128-bit vector of [4 x i32].\n" "///\n" "/// If the result of either conversion is inexact, the result is truncated\n" "/// (rounded towards zero) regardless of the current MXCSR setting. The upper\n" "/// 64 bits of the result vector are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTPD2DQ / CVTTPD2DQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the\n" "/// converted values. The upper 64 bits are set to zero.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {\n" " return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);\n" "}\n" "\n" "/// Converts the low-order element of a [2 x double] vector into a 32-bit\n" "/// signed integer value, truncating the result when it is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the\n" "/// conversion.\n" "/// \\returns A 32-bit signed integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {\n" " return __builtin_ia32_cvttsd2si((__v2df)__a);\n" "}\n" "\n" "/// Converts the two double-precision floating-point elements of a\n" "/// 128-bit vector of [2 x double] into two signed 32-bit integer values,\n" "/// returned in a 64-bit vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPD2PI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 64-bit vector of [2 x i32] containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {\n" " return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);\n" "}\n" "\n" "/// Converts the two double-precision floating-point elements of a\n" "/// 128-bit vector of [2 x double] into two signed 32-bit integer values,\n" "/// returned in a 64-bit vector of [2 x i32].\n" "///\n" "/// If the result of either conversion is inexact, the result is truncated\n" "/// (rounded towards zero) regardless of the current MXCSR setting.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTTPD2PI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 64-bit vector of [2 x i32] containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) {\n" " return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);\n" "}\n" "\n" "/// Converts the two signed 32-bit integer elements of a 64-bit vector of\n" "/// [2 x i32] into two double-precision floating-point values, returned in a\n" "/// 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PD instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [2 x i32].\n" "/// \\returns A 128-bit vector of [2 x double] containing the converted values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) {\n" " return __builtin_ia32_cvtpi2pd((__v2si)__a);\n" "}\n" "\n" "/// Returns the low-order element of a 128-bit vector of [2 x double] as\n" "/// a double-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are returned.\n" "/// \\returns A double-precision floating-point value copied from the lower 64\n" "/// bits of \\a __a.\n" "static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) {\n" " return __a[0];\n" "}\n" "\n" "/// Loads a 128-bit floating-point vector of [2 x double] from an aligned\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location has to be 16-byte aligned.\n" "/// \\returns A 128-bit vector of [2 x double] containing the loaded values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) {\n" " return *(const __m128d *)__dp;\n" "}\n" "\n" "/// Loads a double-precision floating-point value from a specified memory\n" "/// location and duplicates it to both vector elements of a 128-bit vector of\n" "/// [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP / MOVDDUP instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a memory location containing a double-precision value.\n" "/// \\returns A 128-bit vector of [2 x double] containing the loaded and\n" "/// duplicated values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) {\n" " struct __mm_load1_pd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u;\n" " return __extension__(__m128d){__u, __u};\n" "}\n" "\n" "#define _mm_load_pd1(dp) _mm_load1_pd(dp)\n" "\n" "/// Loads two double-precision values, in reverse order, from an aligned\n" "/// memory location into a 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction +\n" "/// needed shuffling instructions. In AVX mode, the shuffling may be combined\n" "/// with the \\c VMOVAPD, resulting in only a \\c VPERMILPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A 16-byte aligned pointer to an array of double-precision values to be\n" "/// loaded in reverse order.\n" "/// \\returns A 128-bit vector of [2 x double] containing the reversed loaded\n" "/// values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) {\n" " __m128d __u = *(const __m128d *)__dp;\n" " return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);\n" "}\n" "\n" "/// Loads a 128-bit floating-point vector of [2 x double] from an\n" "/// unaligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [2 x double] containing the loaded values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) {\n" " struct __loadu_pd {\n" " __m128d_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_pd *)__dp)->__v;\n" "}\n" "\n" "/// Loads a 64-bit integer value to the low element of a 128-bit integer\n" "/// vector and clears the upper element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __a\n" "/// A pointer to a 64-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the loaded value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) {\n" " struct __loadu_si64 {\n" " long long __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " long long __u = ((const struct __loadu_si64 *)__a)->__v;\n" " return __extension__(__m128i)(__v2di){__u, 0LL};\n" "}\n" "\n" "/// Loads a 32-bit integer value to the low element of a 128-bit integer\n" "/// vector and clears the upper element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVD / MOVD instruction.\n" "///\n" "/// \\param __a\n" "/// A pointer to a 32-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the loaded value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) {\n" " struct __loadu_si32 {\n" " int __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int __u = ((const struct __loadu_si32 *)__a)->__v;\n" " return __extension__(__m128i)(__v4si){__u, 0, 0, 0};\n" "}\n" "\n" "/// Loads a 16-bit integer value to the low element of a 128-bit integer\n" "/// vector and clears the upper element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic does not correspond to a specific instruction.\n" "///\n" "/// \\param __a\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the loaded value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) {\n" " struct __loadu_si16 {\n" " short __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " short __u = ((const struct __loadu_si16 *)__a)->__v;\n" " return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};\n" "}\n" "\n" "/// Loads a 64-bit double-precision value to the low element of a\n" "/// 128-bit integer vector and clears the upper element.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSD / MOVSD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a memory location containing a double-precision value.\n" "/// The address of the memory location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [2 x double] containing the loaded value.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) {\n" " struct __mm_load_sd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " double __u = ((const struct __mm_load_sd_struct *)__dp)->__u;\n" " return __extension__(__m128d){__u, 0};\n" "}\n" "\n" "/// Loads a double-precision value into the high-order bits of a 128-bit\n" "/// vector of [2 x double]. The low-order bits are copied from the low-order\n" "/// bits of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [63:0] are written to bits [63:0] of the result.\n" "/// \\param __dp\n" "/// A pointer to a 64-bit memory location containing a double-precision\n" "/// floating-point value that is loaded. The loaded value is written to bits\n" "/// [127:64] of the result. The address of the memory location does not have\n" "/// to be aligned.\n" "/// \\returns A 128-bit vector of [2 x double] containing the moved values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a,\n" " double const *__dp) {\n" " struct __mm_loadh_pd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u;\n" " return __extension__(__m128d){__a[0], __u};\n" "}\n" "\n" "/// Loads a double-precision value into the low-order bits of a 128-bit\n" "/// vector of [2 x double]. The high-order bits are copied from the\n" "/// high-order bits of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [127:64] are written to bits [127:64] of the result.\n" "/// \\param __dp\n" "/// A pointer to a 64-bit memory location containing a double-precision\n" "/// floating-point value that is loaded. The loaded value is written to bits\n" "/// [63:0] of the result. The address of the memory location does not have to\n" "/// be aligned.\n" "/// \\returns A 128-bit vector of [2 x double] containing the moved values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a,\n" " double const *__dp) {\n" " struct __mm_loadl_pd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u;\n" " return __extension__(__m128d){__u, __a[1]};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double] with\n" "/// unspecified content. This could be used as an argument to another\n" "/// intrinsic function where the argument is required but the value is not\n" "/// actually used.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 128-bit floating-point vector of [2 x double] with unspecified\n" "/// content.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {\n" " return (__m128d)__builtin_ia32_undef128();\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double]. The lower\n" "/// 64 bits of the vector are initialized with the specified double-precision\n" "/// floating-point value. The upper 64 bits are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize the lower 64\n" "/// bits of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double]. The\n" "/// lower 64 bits contain the value of the parameter. The upper 64 bits are\n" "/// set to zero.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {\n" " return __extension__(__m128d){__w, 0};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double], with each\n" "/// of the two double-precision floating-point vector elements set to the\n" "/// specified double-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double].\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) {\n" " return __extension__(__m128d){__w, __w};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double], with each\n" "/// of the two double-precision floating-point vector elements set to the\n" "/// specified double-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double].\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) {\n" " return _mm_set1_pd(__w);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double]\n" "/// initialized with the specified double-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize the upper 64\n" "/// bits of the result.\n" "/// \\param __x\n" "/// A double-precision floating-point value used to initialize the lower 64\n" "/// bits of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double].\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w,\n" " double __x) {\n" " return __extension__(__m128d){__x, __w};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double],\n" "/// initialized in reverse order with the specified double-precision\n" "/// floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction.\n" "///\n" "/// \\param __w\n" "/// A double-precision floating-point value used to initialize the lower 64\n" "/// bits of the result.\n" "/// \\param __x\n" "/// A double-precision floating-point value used to initialize the upper 64\n" "/// bits of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double].\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w,\n" " double __x) {\n" " return __extension__(__m128d){__w, __x};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double]\n" "/// initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS / XORPS instruction.\n" "///\n" "/// \\returns An initialized 128-bit floating-point vector of [2 x double] with\n" "/// all elements set to zero.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) {\n" " return __extension__(__m128d){0.0, 0.0};\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double]. The lower\n" "/// 64 bits are set to the lower 64 bits of the second parameter. The upper\n" "/// 64 bits are set to the upper 64 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The upper 64 bits are written to the\n" "/// upper 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the\n" "/// lower 64 bits of the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the moved values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a,\n" " __m128d __b) {\n" " __a[0] = __b[0];\n" " return __a;\n" "}\n" "\n" "/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSD / MOVSD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp,\n" " __m128d __a) {\n" " struct __mm_store_sd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_store_sd_struct *)__dp)->__u = __a[0];\n" "}\n" "\n" "/// Moves packed double-precision values from a 128-bit vector of\n" "/// [2 x double] to a memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPD / MOVAPS instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to an aligned memory location that can store two\n" "/// double-precision values.\n" "/// \\param __a\n" "/// A packed 128-bit vector of [2 x double] containing the values to be\n" "/// moved.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp,\n" " __m128d __a) {\n" " *(__m128d *)__dp = __a;\n" "}\n" "\n" "/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to\n" "/// the upper and lower 64 bits of a memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the\n" "/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a memory location that can store two double-precision\n" "/// values.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each\n" "/// of the values in \\a __dp.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp,\n" " __m128d __a) {\n" " __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);\n" " _mm_store_pd(__dp, __a);\n" "}\n" "\n" "/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to\n" "/// the upper and lower 64 bits of a memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the\n" "/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a memory location that can store two double-precision\n" "/// values.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each\n" "/// of the values in \\a __dp.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp,\n" " __m128d __a) {\n" " _mm_store1_pd(__dp, __a);\n" "}\n" "\n" "/// Stores a 128-bit vector of [2 x double] into an unaligned memory\n" "/// location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp,\n" " __m128d __a) {\n" " struct __storeu_pd {\n" " __m128d_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_pd *)__dp)->__v = __a;\n" "}\n" "\n" "/// Stores two double-precision values, in reverse order, from a 128-bit\n" "/// vector of [2 x double] to a 16-byte aligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to a shuffling instruction followed by a\n" "/// VMOVAPD / MOVAPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 16-byte aligned memory location that can store two\n" "/// double-precision values.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the values to be reversed and\n" "/// stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp,\n" " __m128d __a) {\n" " __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);\n" " *(__m128d *)__dp = __a;\n" "}\n" "\n" "/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp,\n" " __m128d __a) {\n" " struct __mm_storeh_pd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1];\n" "}\n" "\n" "/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction.\n" "///\n" "/// \\param __dp\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp,\n" " __m128d __a) {\n" " struct __mm_storeh_pd_struct {\n" " double __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0];\n" "}\n" "\n" "/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],\n" "/// saving the lower 8 bits of each sum in the corresponding element of a\n" "/// 128-bit result vector of [16 x i8].\n" "///\n" "/// The integer elements of both parameters can be either signed or unsigned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDB / PADDB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param __b\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\returns A 128-bit vector of [16 x i8] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v16qu)__a + (__v16qu)__b);\n" "}\n" "\n" "/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],\n" "/// saving the lower 16 bits of each sum in the corresponding element of a\n" "/// 128-bit result vector of [8 x i16].\n" "///\n" "/// The integer elements of both parameters can be either signed or unsigned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDW / PADDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16].\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16].\n" "/// \\returns A 128-bit vector of [8 x i16] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v8hu)__a + (__v8hu)__b);\n" "}\n" "\n" "/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],\n" "/// saving the lower 32 bits of each sum in the corresponding element of a\n" "/// 128-bit result vector of [4 x i32].\n" "///\n" "/// The integer elements of both parameters can be either signed or unsigned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDD / PADDD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\returns A 128-bit vector of [4 x i32] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v4su)__a + (__v4su)__b);\n" "}\n" "\n" "/// Adds two signed or unsigned 64-bit integer values, returning the\n" "/// lower 64 bits of the sum.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer.\n" "/// \\param __b\n" "/// A 64-bit integer.\n" "/// \\returns A 64-bit integer containing the sum of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a,\n" " __m64 __b) {\n" " return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);\n" "}\n" "\n" "/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],\n" "/// saving the lower 64 bits of each sum in the corresponding element of a\n" "/// 128-bit result vector of [2 x i64].\n" "///\n" "/// The integer elements of both parameters can be either signed or unsigned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDQ / PADDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x i64].\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x i64].\n" "/// \\returns A 128-bit vector of [2 x i64] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v2du)__a + (__v2du)__b);\n" "}\n" "\n" "/// Adds, with saturation, the corresponding elements of two 128-bit\n" "/// signed [16 x i8] vectors, saving each sum in the corresponding element of\n" "/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are\n" "/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDSB / PADDSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [16 x i8] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [16 x i8] vector.\n" "/// \\returns A 128-bit signed [16 x i8] vector containing the saturated sums of\n" "/// both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);\n" "}\n" "\n" "/// Adds, with saturation, the corresponding elements of two 128-bit\n" "/// signed [8 x i16] vectors, saving each sum in the corresponding element of\n" "/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF\n" "/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to\n" "/// 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDSW / PADDSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [8 x i16] vector containing the saturated sums of\n" "/// both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Adds, with saturation, the corresponding elements of two 128-bit\n" "/// unsigned [16 x i8] vectors, saving each sum in the corresponding element\n" "/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF\n" "/// are saturated to 0xFF. Negative sums are saturated to 0x00.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\returns A 128-bit unsigned [16 x i8] vector containing the saturated sums\n" "/// of both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);\n" "}\n" "\n" "/// Adds, with saturation, the corresponding elements of two 128-bit\n" "/// unsigned [8 x i16] vectors, saving each sum in the corresponding element\n" "/// of a 128-bit result vector of [8 x i16]. Positive sums greater than\n" "/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\returns A 128-bit unsigned [8 x i16] vector containing the saturated sums\n" "/// of both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);\n" "}\n" "\n" "/// Computes the rounded averages of corresponding elements of two\n" "/// 128-bit unsigned [16 x i8] vectors, saving each result in the\n" "/// corresponding element of a 128-bit result vector of [16 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPAVGB / PAVGB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\returns A 128-bit unsigned [16 x i8] vector containing the rounded\n" "/// averages of both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);\n" "}\n" "\n" "/// Computes the rounded averages of corresponding elements of two\n" "/// 128-bit unsigned [8 x i16] vectors, saving each result in the\n" "/// corresponding element of a 128-bit result vector of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPAVGW / PAVGW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\returns A 128-bit unsigned [8 x i16] vector containing the rounded\n" "/// averages of both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]\n" "/// vectors, producing eight intermediate 32-bit signed integer products, and\n" "/// adds the consecutive pairs of 32-bit products to form a 128-bit signed\n" "/// [4 x i32] vector.\n" "///\n" "/// For example, bits [15:0] of both parameters are multiplied producing a\n" "/// 32-bit product, bits [31:16] of both parameters are multiplied producing\n" "/// a 32-bit product, and the sum of those two products becomes bits [31:0]\n" "/// of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMADDWD / PMADDWD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [4 x i32] vector containing the sums of products\n" "/// of both parameters.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Compares corresponding elements of two 128-bit signed [8 x i16]\n" "/// vectors, saving the greater value from each comparison in the\n" "/// corresponding element of a 128-bit result vector of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXSW / PMAXSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [8 x i16] vector containing the greater value of\n" "/// each comparison.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Compares corresponding elements of two 128-bit unsigned [16 x i8]\n" "/// vectors, saving the greater value from each comparison in the\n" "/// corresponding element of a 128-bit result vector of [16 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXUB / PMAXUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\returns A 128-bit unsigned [16 x i8] vector containing the greater value of\n" "/// each comparison.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);\n" "}\n" "\n" "/// Compares corresponding elements of two 128-bit signed [8 x i16]\n" "/// vectors, saving the smaller value from each comparison in the\n" "/// corresponding element of a 128-bit result vector of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINSW / PMINSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [8 x i16] vector containing the smaller value of\n" "/// each comparison.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Compares corresponding elements of two 128-bit unsigned [16 x i8]\n" "/// vectors, saving the smaller value from each comparison in the\n" "/// corresponding element of a 128-bit result vector of [16 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINUB / PMINUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [16 x i8] vector.\n" "/// \\returns A 128-bit unsigned [16 x i8] vector containing the smaller value of\n" "/// each comparison.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);\n" "}\n" "\n" "/// Multiplies the corresponding elements of two signed [8 x i16]\n" "/// vectors, saving the upper 16 bits of each 32-bit product in the\n" "/// corresponding element of a 128-bit signed [8 x i16] result vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULHW / PMULHW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of\n" "/// each of the eight 32-bit products.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Multiplies the corresponding elements of two unsigned [8 x i16]\n" "/// vectors, saving the upper 16 bits of each 32-bit product in the\n" "/// corresponding element of a 128-bit unsigned [8 x i16] result vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULHUW / PMULHUW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit unsigned [8 x i16] vector.\n" "/// \\returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits\n" "/// of each of the eight 32-bit products.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Multiplies the corresponding elements of two signed [8 x i16]\n" "/// vectors, saving the lower 16 bits of each 32-bit product in the\n" "/// corresponding element of a 128-bit signed [8 x i16] result vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULLW / PMULLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\param __b\n" "/// A 128-bit signed [8 x i16] vector.\n" "/// \\returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of\n" "/// each of the eight 32-bit products.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v8hu)__a * (__v8hu)__b);\n" "}\n" "\n" "/// Multiplies 32-bit unsigned integer values contained in the lower bits\n" "/// of the two 64-bit integer vectors and returns the 64-bit unsigned\n" "/// product.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMULUDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the product of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a,\n" " __m64 __b) {\n" " return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);\n" "}\n" "\n" "/// Multiplies 32-bit unsigned integer values contained in the lower\n" "/// bits of the corresponding elements of two [2 x i64] vectors, and returns\n" "/// the 64-bit products in the corresponding elements of a [2 x i64] vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULUDQ / PMULUDQ instruction.\n" "///\n" "/// \\param __a\n" "/// A [2 x i64] vector containing one of the source operands.\n" "/// \\param __b\n" "/// A [2 x i64] vector containing one of the source operands.\n" "/// \\returns A [2 x i64] vector containing the product of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,\n" " __m128i __b) {\n" " return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);\n" "}\n" "\n" "/// Computes the absolute differences of corresponding 8-bit integer\n" "/// values in two 128-bit vectors. Sums the first 8 absolute differences, and\n" "/// separately sums the second 8 absolute differences. Packs these two\n" "/// unsigned 16-bit integer sums into the upper and lower elements of a\n" "/// [2 x i64] vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSADBW / PSADBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\returns A [2 x i64] vector containing the sums of the sets of absolute\n" "/// differences between both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a,\n" " __m128i __b) {\n" " return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);\n" "}\n" "\n" "/// Subtracts the corresponding 8-bit integer values in the operands.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBB / PSUBB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v16qu)__a - (__v16qu)__b);\n" "}\n" "\n" "/// Subtracts the corresponding 16-bit integer values in the operands.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBW / PSUBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v8hu)__a - (__v8hu)__b);\n" "}\n" "\n" "/// Subtracts the corresponding 32-bit integer values in the operands.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBD / PSUBD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v4su)__a - (__v4su)__b);\n" "}\n" "\n" "/// Subtracts signed or unsigned 64-bit integer values and writes the\n" "/// difference to the corresponding bits in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the minuend.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing the subtrahend.\n" "/// \\returns A 64-bit integer vector containing the difference of the values in\n" "/// the operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a,\n" " __m64 __b) {\n" " return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);\n" "}\n" "\n" "/// Subtracts the corresponding elements of two [2 x i64] vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBQ / PSUBQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v2du)__a - (__v2du)__b);\n" "}\n" "\n" "/// Subtracts corresponding 8-bit signed integer values in the input and\n" "/// returns the differences in the corresponding bytes in the destination.\n" "/// Differences greater than 0x7F are saturated to 0x7F, and differences less\n" "/// than 0x80 are saturated to 0x80.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBSB / PSUBSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);\n" "}\n" "\n" "/// Subtracts corresponding 16-bit signed integer values in the input and\n" "/// returns the differences in the corresponding bytes in the destination.\n" "/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less\n" "/// than 0x8000 are saturated to 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBSW / PSUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the differences of the values\n" "/// in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Subtracts corresponding 8-bit unsigned integer values in the input\n" "/// and returns the differences in the corresponding bytes in the\n" "/// destination. Differences less than 0x00 are saturated to 0x00.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBUSB / PSUBUSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the unsigned integer\n" "/// differences of the values in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);\n" "}\n" "\n" "/// Subtracts corresponding 16-bit unsigned integer values in the input\n" "/// and returns the differences in the corresponding bytes in the\n" "/// destination. Differences less than 0x0000 are saturated to 0x0000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSUBUSW / PSUBUSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the minuends.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the subtrahends.\n" "/// \\returns A 128-bit integer vector containing the unsigned integer\n" "/// differences of the values in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPAND / PAND instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\returns A 128-bit integer vector containing the bitwise AND of the values\n" "/// in both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v2du)__a & (__v2du)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit integer vectors, using the\n" "/// one's complement of the values contained in the first source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPANDN / PANDN instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector containing the left source operand. The one's complement\n" "/// of this value is used in the bitwise AND.\n" "/// \\param __b\n" "/// A 128-bit vector containing the right source operand.\n" "/// \\returns A 128-bit integer vector containing the bitwise AND of the one's\n" "/// complement of the first operand and the values in the second operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)(~(__v2du)__a & (__v2du)__b);\n" "}\n" "/// Performs a bitwise OR of two 128-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPOR / POR instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\returns A 128-bit integer vector containing the bitwise OR of the values\n" "/// in both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v2du)__a | (__v2du)__b);\n" "}\n" "\n" "/// Performs a bitwise exclusive OR of two 128-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPXOR / PXOR instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing one of the source operands.\n" "/// \\returns A 128-bit integer vector containing the bitwise exclusive OR of the\n" "/// values in both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v2du)__a ^ (__v2du)__b);\n" "}\n" "\n" "/// Left-shifts the 128-bit integer vector operand by the specified\n" "/// number of bytes. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_slli_si128(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPSLLDQ / PSLLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param imm\n" "/// An immediate value specifying the number of bytes to left-shift operand\n" "/// \\a a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted value.\n" "#define _mm_slli_si128(a, imm) \\\n" " ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \\\n" " (int)(imm)))\n" "\n" "#define _mm_bslli_si128(a, imm) \\\n" " ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \\\n" " (int)(imm)))\n" "\n" "/// Left-shifts each 16-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLW / PSLLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to left-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);\n" "}\n" "\n" "/// Left-shifts each 16-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLW / PSLLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to left-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Left-shifts each 32-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLD / PSLLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to left-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);\n" "}\n" "\n" "/// Left-shifts each 32-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLD / PSLLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to left-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Left-shifts each 64-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to left-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,\n" " int __count) {\n" " return __builtin_ia32_psllqi128((__v2di)__a, __count);\n" "}\n" "\n" "/// Left-shifts each 64-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. Low-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to left-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the left-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,\n" " __m128i __count) {\n" " return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);\n" "}\n" "\n" "/// Right-shifts each 16-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. High-order bits are filled with the sign\n" "/// bit of the initial value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRAW / PSRAW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to right-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);\n" "}\n" "\n" "/// Right-shifts each 16-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. High-order bits are filled with the sign\n" "/// bit of the initial value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRAW / PSRAW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to right-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Right-shifts each 32-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. High-order bits are filled with the sign\n" "/// bit of the initial value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRAD / PSRAD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to right-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);\n" "}\n" "\n" "/// Right-shifts each 32-bit value in the 128-bit integer vector operand\n" "/// by the specified number of bits. High-order bits are filled with the sign\n" "/// bit of the initial value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRAD / PSRAD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to right-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Right-shifts the 128-bit integer vector operand by the specified\n" "/// number of bytes. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_srli_si128(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPSRLDQ / PSRLDQ instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param imm\n" "/// An immediate value specifying the number of bytes to right-shift operand\n" "/// \\a a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted value.\n" "#define _mm_srli_si128(a, imm) \\\n" " ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \\\n" " (int)(imm)))\n" "\n" "#define _mm_bsrli_si128(a, imm) \\\n" " ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \\\n" " (int)(imm)))\n" "\n" "/// Right-shifts each of 16-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLW / PSRLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to right-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);\n" "}\n" "\n" "/// Right-shifts each of 16-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLW / PSRLW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to right-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);\n" "}\n" "\n" "/// Right-shifts each of 32-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLD / PSRLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to right-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,\n" " int __count) {\n" " return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);\n" "}\n" "\n" "/// Right-shifts each of 32-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLD / PSRLD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to right-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,\n" " __m128i __count) {\n" " return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);\n" "}\n" "\n" "/// Right-shifts each of 64-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// An integer value specifying the number of bits to right-shift each value\n" "/// in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,\n" " int __count) {\n" " return __builtin_ia32_psrlqi128((__v2di)__a, __count);\n" "}\n" "\n" "/// Right-shifts each of 64-bit values in the 128-bit integer vector\n" "/// operand by the specified number of bits. High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the source operand.\n" "/// \\param __count\n" "/// A 128-bit integer vector in which bits [63:0] specify the number of bits\n" "/// to right-shift each value in operand \\a __a.\n" "/// \\returns A 128-bit integer vector containing the right-shifted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,\n" " __m128i __count) {\n" " return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);\n" "}\n" "\n" "/// Compares each of the corresponding 8-bit values of the 128-bit\n" "/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF\n" "/// for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPEQB / PCMPEQB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v16qi)__a == (__v16qi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 16-bit values of the 128-bit\n" "/// integer vectors for equality. Each comparison yields 0x0 for false,\n" "/// 0xFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPEQW / PCMPEQW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v8hi)__a == (__v8hi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit values of the 128-bit\n" "/// integer vectors for equality. Each comparison yields 0x0 for false,\n" "/// 0xFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPEQD / PCMPEQD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v4si)__a == (__v4si)__b);\n" "}\n" "\n" "/// Compares each of the corresponding signed 8-bit values of the 128-bit\n" "/// integer vectors to determine if the values in the first operand are\n" "/// greater than those in the second operand. Each comparison yields 0x0 for\n" "/// false, 0xFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,\n" " __m128i __b) {\n" " /* This function always performs a signed comparison, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m128i)((__v16qs)__a > (__v16qs)__b);\n" "}\n" "\n" "/// Compares each of the corresponding signed 16-bit values of the\n" "/// 128-bit integer vectors to determine if the values in the first operand\n" "/// are greater than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v8hi)__a > (__v8hi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding signed 32-bit values of the\n" "/// 128-bit integer vectors to determine if the values in the first operand\n" "/// are greater than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)((__v4si)__a > (__v4si)__b);\n" "}\n" "\n" "/// Compares each of the corresponding signed 8-bit values of the 128-bit\n" "/// integer vectors to determine if the values in the first operand are less\n" "/// than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,\n" " __m128i __b) {\n" " return _mm_cmpgt_epi8(__b, __a);\n" "}\n" "\n" "/// Compares each of the corresponding signed 16-bit values of the\n" "/// 128-bit integer vectors to determine if the values in the first operand\n" "/// are less than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,\n" " __m128i __b) {\n" " return _mm_cmpgt_epi16(__b, __a);\n" "}\n" "\n" "/// Compares each of the corresponding signed 32-bit values of the\n" "/// 128-bit integer vectors to determine if the values in the first operand\n" "/// are less than those in the second operand.\n" "///\n" "/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\param __b\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,\n" " __m128i __b) {\n" " return _mm_cmpgt_epi32(__b, __a);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Converts a 64-bit signed integer value from the second operand into a\n" "/// double-precision value and returns it in the lower element of a [2 x\n" "/// double] vector; the upper element of the returned vector is copied from\n" "/// the upper element of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are\n" "/// copied to the upper 64 bits of the destination.\n" "/// \\param __b\n" "/// A 64-bit signed integer operand containing the value to be converted.\n" "/// \\returns A 128-bit vector of [2 x double] whose lower 64 bits contain the\n" "/// converted value of the second operand. The upper 64 bits are copied from\n" "/// the upper 64 bits of the first operand.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,\n" " long long __b) {\n" " __a[0] = __b;\n" " return __a;\n" "}\n" "\n" "/// Converts the first (lower) element of a vector of [2 x double] into a\n" "/// 64-bit signed integer value, according to the current rounding mode.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the\n" "/// conversion.\n" "/// \\returns A 64-bit signed integer containing the converted value.\n" "static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {\n" " return __builtin_ia32_cvtsd2si64((__v2df)__a);\n" "}\n" "\n" "/// Converts the first (lower) element of a vector of [2 x double] into a\n" "/// 64-bit signed integer value, truncating the result when it is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the\n" "/// conversion.\n" "/// \\returns A 64-bit signed integer containing the converted value.\n" "static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) {\n" " return __builtin_ia32_cvttsd2si64((__v2df)__a);\n" "}\n" "#endif\n" "\n" "/// Converts a vector of [4 x i32] into a vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTDQ2PS / CVTDQ2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit vector of [4 x float] containing the converted values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {\n" " return (__m128) __builtin_convertvector((__v4si)__a, __v4sf);\n" "}\n" "\n" "/// Converts a vector of [4 x float] into a vector of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit integer vector of [4 x i32] containing the converted\n" "/// values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {\n" " return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);\n" "}\n" "\n" "/// Converts a vector of [4 x float] into a vector of [4 x i32],\n" "/// truncating the result when it is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTPS2DQ / CVTTPS2DQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x i32] containing the converted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {\n" " return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);\n" "}\n" "\n" "/// Returns a vector of [4 x i32] where the lowest element is the input\n" "/// operand and the remaining elements are zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVD / MOVD instruction.\n" "///\n" "/// \\param __a\n" "/// A 32-bit signed integer operand.\n" "/// \\returns A 128-bit vector of [4 x i32].\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {\n" " return __extension__(__m128i)(__v4si){__a, 0, 0, 0};\n" "}\n" "\n" "/// Returns a vector of [2 x i64] where the lower element is the input\n" "/// operand and the upper element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction\n" "/// in 64-bit mode.\n" "///\n" "/// \\param __a\n" "/// A 64-bit signed integer operand containing the value to be converted.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the converted value.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {\n" " return __extension__(__m128i)(__v2di){__a, 0};\n" "}\n" "\n" "/// Moves the least significant 32 bits of a vector of [4 x i32] to a\n" "/// 32-bit signed integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVD / MOVD instruction.\n" "///\n" "/// \\param __a\n" "/// A vector of [4 x i32]. The least significant 32 bits are moved to the\n" "/// destination.\n" "/// \\returns A 32-bit signed integer containing the moved value.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {\n" " __v4si __b = (__v4si)__a;\n" " return __b[0];\n" "}\n" "\n" "/// Moves the least significant 64 bits of a vector of [2 x i64] to a\n" "/// 64-bit signed integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __a\n" "/// A vector of [2 x i64]. The least significant 64 bits are moved to the\n" "/// destination.\n" "/// \\returns A 64-bit signed integer containing the moved value.\n" "static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {\n" " return __a[0];\n" "}\n" "\n" "/// Moves packed integer values from an aligned 128-bit memory location\n" "/// to elements in a 128-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQA / MOVDQA instruction.\n" "///\n" "/// \\param __p\n" "/// An aligned pointer to a memory location containing integer values.\n" "/// \\returns A 128-bit integer vector containing the moved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_load_si128(__m128i const *__p) {\n" " return *__p;\n" "}\n" "\n" "/// Moves packed integer values from an unaligned 128-bit memory location\n" "/// to elements in a 128-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDQU / MOVDQU instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location containing integer values.\n" "/// \\returns A 128-bit integer vector containing the moved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_loadu_si128(__m128i_u const *__p) {\n" " struct __loadu_si128 {\n" " __m128i_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_si128 *)__p)->__v;\n" "}\n" "\n" "/// Returns a vector of [2 x i64] where the lower element is taken from\n" "/// the lower element of the operand, and the upper element is zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __p\n" "/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of\n" "/// the destination.\n" "/// \\returns A 128-bit vector of [2 x i64]. The lower order bits contain the\n" "/// moved value. The higher order bits are cleared.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_loadl_epi64(__m128i_u const *__p) {\n" " struct __mm_loadl_epi64_struct {\n" " long long __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " return __extension__(__m128i){\n" " ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0};\n" "}\n" "\n" "/// Generates a 128-bit vector of [4 x i32] with unspecified content.\n" "/// This could be used as an argument to another intrinsic function where the\n" "/// argument is required but the value is not actually used.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 128-bit vector of [4 x i32] with unspecified content.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) {\n" " return (__m128i)__builtin_ia32_undef128();\n" "}\n" "\n" "/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with\n" "/// the specified 64-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __q1\n" "/// A 64-bit integer value used to initialize the upper 64 bits of the\n" "/// destination vector of [2 x i64].\n" "/// \\param __q0\n" "/// A 64-bit integer value used to initialize the lower 64 bits of the\n" "/// destination vector of [2 x i64].\n" "/// \\returns An initialized 128-bit vector of [2 x i64] containing the values\n" "/// provided in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1,\n" " long long __q0) {\n" " return __extension__(__m128i)(__v2di){__q0, __q1};\n" "}\n" "\n" "/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with\n" "/// the specified 64-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __q1\n" "/// A 64-bit integer value used to initialize the upper 64 bits of the\n" "/// destination vector of [2 x i64].\n" "/// \\param __q0\n" "/// A 64-bit integer value used to initialize the lower 64 bits of the\n" "/// destination vector of [2 x i64].\n" "/// \\returns An initialized 128-bit vector of [2 x i64] containing the values\n" "/// provided in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1,\n" " __m64 __q0) {\n" " return _mm_set_epi64x((long long)__q1, (long long)__q0);\n" "}\n" "\n" "/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with\n" "/// the specified 32-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i3\n" "/// A 32-bit integer value used to initialize bits [127:96] of the\n" "/// destination vector.\n" "/// \\param __i2\n" "/// A 32-bit integer value used to initialize bits [95:64] of the destination\n" "/// vector.\n" "/// \\param __i1\n" "/// A 32-bit integer value used to initialize bits [63:32] of the destination\n" "/// vector.\n" "/// \\param __i0\n" "/// A 32-bit integer value used to initialize bits [31:0] of the destination\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [4 x i32] containing the values\n" "/// provided in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2,\n" " int __i1, int __i0) {\n" " return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};\n" "}\n" "\n" "/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with\n" "/// the specified 16-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w7\n" "/// A 16-bit integer value used to initialize bits [127:112] of the\n" "/// destination vector.\n" "/// \\param __w6\n" "/// A 16-bit integer value used to initialize bits [111:96] of the\n" "/// destination vector.\n" "/// \\param __w5\n" "/// A 16-bit integer value used to initialize bits [95:80] of the destination\n" "/// vector.\n" "/// \\param __w4\n" "/// A 16-bit integer value used to initialize bits [79:64] of the destination\n" "/// vector.\n" "/// \\param __w3\n" "/// A 16-bit integer value used to initialize bits [63:48] of the destination\n" "/// vector.\n" "/// \\param __w2\n" "/// A 16-bit integer value used to initialize bits [47:32] of the destination\n" "/// vector.\n" "/// \\param __w1\n" "/// A 16-bit integer value used to initialize bits [31:16] of the destination\n" "/// vector.\n" "/// \\param __w0\n" "/// A 16-bit integer value used to initialize bits [15:0] of the destination\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [8 x i16] containing the values\n" "/// provided in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3,\n" " short __w2, short __w1, short __w0) {\n" " return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,\n" " __w4, __w5, __w6, __w7};\n" "}\n" "\n" "/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with\n" "/// the specified 8-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b15\n" "/// Initializes bits [127:120] of the destination vector.\n" "/// \\param __b14\n" "/// Initializes bits [119:112] of the destination vector.\n" "/// \\param __b13\n" "/// Initializes bits [111:104] of the destination vector.\n" "/// \\param __b12\n" "/// Initializes bits [103:96] of the destination vector.\n" "/// \\param __b11\n" "/// Initializes bits [95:88] of the destination vector.\n" "/// \\param __b10\n" "/// Initializes bits [87:80] of the destination vector.\n" "/// \\param __b9\n" "/// Initializes bits [79:72] of the destination vector.\n" "/// \\param __b8\n" "/// Initializes bits [71:64] of the destination vector.\n" "/// \\param __b7\n" "/// Initializes bits [63:56] of the destination vector.\n" "/// \\param __b6\n" "/// Initializes bits [55:48] of the destination vector.\n" "/// \\param __b5\n" "/// Initializes bits [47:40] of the destination vector.\n" "/// \\param __b4\n" "/// Initializes bits [39:32] of the destination vector.\n" "/// \\param __b3\n" "/// Initializes bits [31:24] of the destination vector.\n" "/// \\param __b2\n" "/// Initializes bits [23:16] of the destination vector.\n" "/// \\param __b1\n" "/// Initializes bits [15:8] of the destination vector.\n" "/// \\param __b0\n" "/// Initializes bits [7:0] of the destination vector.\n" "/// \\returns An initialized 128-bit vector of [16 x i8] containing the values\n" "/// provided in the operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11,\n" " char __b10, char __b9, char __b8, char __b7, char __b6, char __b5,\n" " char __b4, char __b3, char __b2, char __b1, char __b0) {\n" " return __extension__(__m128i)(__v16qi){\n" " __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,\n" " __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};\n" "}\n" "\n" "/// Initializes both values in a 128-bit integer vector with the\n" "/// specified 64-bit integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __q\n" "/// Integer value used to initialize the elements of the destination integer\n" "/// vector.\n" "/// \\returns An initialized 128-bit integer vector of [2 x i64] with both\n" "/// elements containing the value provided in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) {\n" " return _mm_set_epi64x(__q, __q);\n" "}\n" "\n" "/// Initializes both values in a 128-bit vector of [2 x i64] with the\n" "/// specified 64-bit value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __q\n" "/// A 64-bit value used to initialize the elements of the destination integer\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [2 x i64] with all elements\n" "/// containing the value provided in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) {\n" " return _mm_set_epi64(__q, __q);\n" "}\n" "\n" "/// Initializes all values in a 128-bit vector of [4 x i32] with the\n" "/// specified 32-bit value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i\n" "/// A 32-bit value used to initialize the elements of the destination integer\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [4 x i32] with all elements\n" "/// containing the value provided in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) {\n" " return _mm_set_epi32(__i, __i, __i, __i);\n" "}\n" "\n" "/// Initializes all values in a 128-bit vector of [8 x i16] with the\n" "/// specified 16-bit value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w\n" "/// A 16-bit value used to initialize the elements of the destination integer\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [8 x i16] with all elements\n" "/// containing the value provided in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) {\n" " return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);\n" "}\n" "\n" "/// Initializes all values in a 128-bit vector of [16 x i8] with the\n" "/// specified 8-bit value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b\n" "/// An 8-bit value used to initialize the elements of the destination integer\n" "/// vector.\n" "/// \\returns An initialized 128-bit vector of [16 x i8] with all elements\n" "/// containing the value provided in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) {\n" " return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,\n" " __b, __b, __b, __b, __b);\n" "}\n" "\n" "/// Constructs a 128-bit integer vector, initialized in reverse order\n" "/// with the specified 64-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic does not correspond to a specific instruction.\n" "///\n" "/// \\param __q0\n" "/// A 64-bit integral value used to initialize the lower 64 bits of the\n" "/// result.\n" "/// \\param __q1\n" "/// A 64-bit integral value used to initialize the upper 64 bits of the\n" "/// result.\n" "/// \\returns An initialized 128-bit integer vector.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0,\n" " __m64 __q1) {\n" " return _mm_set_epi64(__q1, __q0);\n" "}\n" "\n" "/// Constructs a 128-bit integer vector, initialized in reverse order\n" "/// with the specified 32-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i0\n" "/// A 32-bit integral value used to initialize bits [31:0] of the result.\n" "/// \\param __i1\n" "/// A 32-bit integral value used to initialize bits [63:32] of the result.\n" "/// \\param __i2\n" "/// A 32-bit integral value used to initialize bits [95:64] of the result.\n" "/// \\param __i3\n" "/// A 32-bit integral value used to initialize bits [127:96] of the result.\n" "/// \\returns An initialized 128-bit integer vector.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1,\n" " int __i2,\n" " int __i3) {\n" " return _mm_set_epi32(__i3, __i2, __i1, __i0);\n" "}\n" "\n" "/// Constructs a 128-bit integer vector, initialized in reverse order\n" "/// with the specified 16-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w0\n" "/// A 16-bit integral value used to initialize bits [15:0] of the result.\n" "/// \\param __w1\n" "/// A 16-bit integral value used to initialize bits [31:16] of the result.\n" "/// \\param __w2\n" "/// A 16-bit integral value used to initialize bits [47:32] of the result.\n" "/// \\param __w3\n" "/// A 16-bit integral value used to initialize bits [63:48] of the result.\n" "/// \\param __w4\n" "/// A 16-bit integral value used to initialize bits [79:64] of the result.\n" "/// \\param __w5\n" "/// A 16-bit integral value used to initialize bits [95:80] of the result.\n" "/// \\param __w6\n" "/// A 16-bit integral value used to initialize bits [111:96] of the result.\n" "/// \\param __w7\n" "/// A 16-bit integral value used to initialize bits [127:112] of the result.\n" "/// \\returns An initialized 128-bit integer vector.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4,\n" " short __w5, short __w6, short __w7) {\n" " return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);\n" "}\n" "\n" "/// Constructs a 128-bit integer vector, initialized in reverse order\n" "/// with the specified 8-bit integral values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b0\n" "/// An 8-bit integral value used to initialize bits [7:0] of the result.\n" "/// \\param __b1\n" "/// An 8-bit integral value used to initialize bits [15:8] of the result.\n" "/// \\param __b2\n" "/// An 8-bit integral value used to initialize bits [23:16] of the result.\n" "/// \\param __b3\n" "/// An 8-bit integral value used to initialize bits [31:24] of the result.\n" "/// \\param __b4\n" "/// An 8-bit integral value used to initialize bits [39:32] of the result.\n" "/// \\param __b5\n" "/// An 8-bit integral value used to initialize bits [47:40] of the result.\n" "/// \\param __b6\n" "/// An 8-bit integral value used to initialize bits [55:48] of the result.\n" "/// \\param __b7\n" "/// An 8-bit integral value used to initialize bits [63:56] of the result.\n" "/// \\param __b8\n" "/// An 8-bit integral value used to initialize bits [71:64] of the result.\n" "/// \\param __b9\n" "/// An 8-bit integral value used to initialize bits [79:72] of the result.\n" "/// \\param __b10\n" "/// An 8-bit integral value used to initialize bits [87:80] of the result.\n" "/// \\param __b11\n" "/// An 8-bit integral value used to initialize bits [95:88] of the result.\n" "/// \\param __b12\n" "/// An 8-bit integral value used to initialize bits [103:96] of the result.\n" "/// \\param __b13\n" "/// An 8-bit integral value used to initialize bits [111:104] of the result.\n" "/// \\param __b14\n" "/// An 8-bit integral value used to initialize bits [119:112] of the result.\n" "/// \\param __b15\n" "/// An 8-bit integral value used to initialize bits [127:120] of the result.\n" "/// \\returns An initialized 128-bit integer vector.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,\n" " char __b6, char __b7, char __b8, char __b9, char __b10,\n" " char __b11, char __b12, char __b13, char __b14, char __b15) {\n" " return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,\n" " __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);\n" "}\n" "\n" "/// Creates a 128-bit integer vector initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS / XORPS instruction.\n" "///\n" "/// \\returns An initialized 128-bit integer vector with all elements set to\n" "/// zero.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) {\n" " return __extension__(__m128i)(__v2di){0LL, 0LL};\n" "}\n" "\n" "/// Stores a 128-bit integer vector to a memory location aligned on a\n" "/// 128-bit boundary.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to an aligned memory location that will receive the integer\n" "/// values.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the values to be moved.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p,\n" " __m128i __b) {\n" " *__p = __b;\n" "}\n" "\n" "/// Stores a 128-bit integer vector to an unaligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the integer values.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the values to be moved.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p,\n" " __m128i __b) {\n" " struct __storeu_si128 {\n" " __m128i_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_si128 *)__p)->__v = __b;\n" "}\n" "\n" "/// Stores a 64-bit integer value from the low element of a 128-bit integer\n" "/// vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 64-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p,\n" " __m128i __b) {\n" " struct __storeu_si64 {\n" " long long __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0];\n" "}\n" "\n" "/// Stores a 32-bit integer value from the low element of a 128-bit integer\n" "/// vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVD / MOVD instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 32-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p,\n" " __m128i __b) {\n" " struct __storeu_si32 {\n" " int __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0];\n" "}\n" "\n" "/// Stores a 16-bit integer value from the low element of a 128-bit integer\n" "/// vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic does not correspond to a specific instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 16-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p,\n" " __m128i __b) {\n" " struct __storeu_si16 {\n" " short __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0];\n" "}\n" "\n" "/// Moves bytes selected by the mask from the first operand to the\n" "/// specified unaligned memory location. When a mask bit is 1, the\n" "/// corresponding byte is written, otherwise it is not written.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon). Exception and trap behavior for elements not selected\n" "/// for storage to memory are implementation dependent.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMASKMOVDQU / MASKMOVDQU \n" "/// instruction.\n" "///\n" "/// \\param __d\n" "/// A 128-bit integer vector containing the values to be moved.\n" "/// \\param __n\n" "/// A 128-bit integer vector containing the mask. The most significant bit of\n" "/// each byte represents the mask bits.\n" "/// \\param __p\n" "/// A pointer to an unaligned 128-bit memory location where the specified\n" "/// values are moved.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d,\n" " __m128i __n,\n" " char *__p) {\n" " __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);\n" "}\n" "\n" "/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to\n" "/// a memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 64-bit memory location that will receive the lower 64 bits\n" "/// of the integer vector parameter.\n" "/// \\param __a\n" "/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the\n" "/// value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p,\n" " __m128i __a) {\n" " struct __mm_storel_epi64_struct {\n" " long long __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0];\n" "}\n" "\n" "/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit\n" "/// aligned memory location.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the 128-bit aligned memory location used to store the value.\n" "/// \\param __a\n" "/// A vector of [2 x double] containing the 64-bit values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(double *__p,\n" " __m128d __a) {\n" " __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p);\n" "}\n" "\n" "/// Stores a 128-bit integer vector to a 128-bit aligned memory location.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the 128-bit aligned memory location used to store the value.\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(__m128i *__p,\n" " __m128i __a) {\n" " __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p);\n" "}\n" "\n" "/// Stores a 32-bit integer value in the specified memory location.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVNTI instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the 32-bit memory location used to store the value.\n" "/// \\param __a\n" "/// A 32-bit integer containing the value to be stored.\n" "static __inline__ void\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse2\")))\n" " _mm_stream_si32(int *__p, int __a) {\n" " __builtin_ia32_movnti(__p, __a);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Stores a 64-bit integer value in the specified memory location.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVNTIQ instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the 64-bit memory location used to store the value.\n" "/// \\param __a\n" "/// A 64-bit integer containing the value to be stored.\n" "static __inline__ void\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse2\")))\n" " _mm_stream_si64(long long *__p, long long __a) {\n" " __builtin_ia32_movnti64(__p, __a);\n" "}\n" "#endif\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/// The cache line containing \\a __p is flushed and invalidated from all\n" "/// caches in the coherency domain.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CLFLUSH instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to the memory location used to identify the cache line to be\n" "/// flushed.\n" "void _mm_clflush(void const *__p);\n" "\n" "/// Forces strong memory ordering (serialization) between load\n" "/// instructions preceding this instruction and load instructions following\n" "/// this instruction, ensuring the system completes all previous loads before\n" "/// executing subsequent loads.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LFENCE instruction.\n" "///\n" "void _mm_lfence(void);\n" "\n" "/// Forces strong memory ordering (serialization) between load and store\n" "/// instructions preceding this instruction and load and store instructions\n" "/// following this instruction, ensuring that the system completes all\n" "/// previous memory accesses before executing subsequent memory accesses.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MFENCE instruction.\n" "///\n" "void _mm_mfence(void);\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "\n" "/// Converts 16-bit signed integers from both 128-bit integer vector\n" "/// operands into 8-bit signed integers, and packs the results into the\n" "/// destination. Positive values greater than 0x7F are saturated to 0x7F.\n" "/// Negative values less than 0x80 are saturated to 0x80.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as\n" "/// a signed integer and is converted to a 8-bit signed integer with\n" "/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less\n" "/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are\n" "/// written to the lower 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as\n" "/// a signed integer and is converted to a 8-bit signed integer with\n" "/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less\n" "/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are\n" "/// written to the higher 64 bits of the result.\n" "/// \\returns A 128-bit vector of [16 x i8] containing the converted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Converts 32-bit signed integers from both 128-bit integer vector\n" "/// operands into 16-bit signed integers, and packs the results into the\n" "/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.\n" "/// Negative values less than 0x8000 are saturated to 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as\n" "/// a signed integer and is converted to a 16-bit signed integer with\n" "/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values\n" "/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values\n" "/// are written to the lower 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as\n" "/// a signed integer and is converted to a 16-bit signed integer with\n" "/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values\n" "/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values\n" "/// are written to the higher 64 bits of the result.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the converted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);\n" "}\n" "\n" "/// Converts 16-bit signed integers from both 128-bit integer vector\n" "/// operands into 8-bit unsigned integers, and packs the results into the\n" "/// destination. Values greater than 0xFF are saturated to 0xFF. Values less\n" "/// than 0x00 are saturated to 0x00.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as\n" "/// a signed integer and is converted to an 8-bit unsigned integer with\n" "/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less\n" "/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are\n" "/// written to the lower 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as\n" "/// a signed integer and is converted to an 8-bit unsigned integer with\n" "/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less\n" "/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are\n" "/// written to the higher 64 bits of the result.\n" "/// \\returns A 128-bit vector of [16 x i8] containing the converted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using\n" "/// the immediate-value parameter as a selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_extract_epi16(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector.\n" "/// \\param imm\n" "/// An immediate value. Bits [2:0] selects values from \\a a to be assigned\n" "/// to bits[15:0] of the result. \\n\n" "/// 000: assign values from bits [15:0] of \\a a. \\n\n" "/// 001: assign values from bits [31:16] of \\a a. \\n\n" "/// 010: assign values from bits [47:32] of \\a a. \\n\n" "/// 011: assign values from bits [63:48] of \\a a. \\n\n" "/// 100: assign values from bits [79:64] of \\a a. \\n\n" "/// 101: assign values from bits [95:80] of \\a a. \\n\n" "/// 110: assign values from bits [111:96] of \\a a. \\n\n" "/// 111: assign values from bits [127:112] of \\a a.\n" "/// \\returns An integer, whose lower 16 bits are selected from the 128-bit\n" "/// integer vector parameter and the remaining bits are assigned zeros.\n" "#define _mm_extract_epi16(a, imm) \\\n" " ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \\\n" " (int)(imm)))\n" "\n" "/// Constructs a 128-bit integer vector by first making a copy of the\n" "/// 128-bit integer vector parameter, and then inserting the lower 16 bits\n" "/// of an integer parameter into an offset specified by the immediate-value\n" "/// parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPINSRW / PINSRW instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector of [8 x i16]. This vector is copied to the\n" "/// result and then one of the eight elements in the result is replaced by\n" "/// the lower 16 bits of \\a b.\n" "/// \\param b\n" "/// An integer. The lower 16 bits of this parameter are written to the\n" "/// result beginning at an offset specified by \\a imm.\n" "/// \\param imm\n" "/// An immediate value specifying the bit offset in the result at which the\n" "/// lower 16 bits of \\a b are written.\n" "/// \\returns A 128-bit integer vector containing the constructed values.\n" "#define _mm_insert_epi16(a, b, imm) \\\n" " ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \\\n" " (int)(imm)))\n" "\n" "/// Copies the values of the most significant bits from each 8-bit\n" "/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask\n" "/// value, zero-extends the value, and writes it to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVMSKB / PMOVMSKB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values with bits to be extracted.\n" "/// \\returns The most significant bits from each 8-bit element in \\a __a,\n" "/// written to bits [15:0]. The other bits are assigned zeros.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {\n" " return __builtin_ia32_pmovmskb128((__v16qi)__a);\n" "}\n" "\n" "/// Constructs a 128-bit integer vector by shuffling four 32-bit\n" "/// elements of a 128-bit integer vector parameter, using the immediate-value\n" "/// parameter as a specifier.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPSHUFD / PSHUFD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector containing the values to be copied.\n" "/// \\param imm\n" "/// An immediate value containing an 8-bit value specifying which elements to\n" "/// copy from a. The destinations within the 128-bit destination are assigned\n" "/// values as follows: \\n\n" "/// Bits [1:0] are used to assign values to bits [31:0] of the result. \\n\n" "/// Bits [3:2] are used to assign values to bits [63:32] of the result. \\n\n" "/// Bits [5:4] are used to assign values to bits [95:64] of the result. \\n\n" "/// Bits [7:6] are used to assign values to bits [127:96] of the result. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: assign values from bits [31:0] of \\a a. \\n\n" "/// 01: assign values from bits [63:32] of \\a a. \\n\n" "/// 10: assign values from bits [95:64] of \\a a. \\n\n" "/// 11: assign values from bits [127:96] of \\a a. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 128-bit integer vector containing the shuffled values.\n" "#define _mm_shuffle_epi32(a, imm) \\\n" " ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))\n" "\n" "/// Constructs a 128-bit integer vector by shuffling four lower 16-bit\n" "/// elements of a 128-bit integer vector of [8 x i16], using the immediate\n" "/// value parameter as a specifier.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPSHUFLW / PSHUFLW instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits\n" "/// [127:64] of the result.\n" "/// \\param imm\n" "/// An 8-bit immediate value specifying which elements to copy from \\a a. \\n\n" "/// Bits[1:0] are used to assign values to bits [15:0] of the result. \\n\n" "/// Bits[3:2] are used to assign values to bits [31:16] of the result. \\n\n" "/// Bits[5:4] are used to assign values to bits [47:32] of the result. \\n\n" "/// Bits[7:6] are used to assign values to bits [63:48] of the result. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: assign values from bits [15:0] of \\a a. \\n\n" "/// 01: assign values from bits [31:16] of \\a a. \\n\n" "/// 10: assign values from bits [47:32] of \\a a. \\n\n" "/// 11: assign values from bits [63:48] of \\a a. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 128-bit integer vector containing the shuffled values.\n" "#define _mm_shufflelo_epi16(a, imm) \\\n" " ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))\n" "\n" "/// Constructs a 128-bit integer vector by shuffling four upper 16-bit\n" "/// elements of a 128-bit integer vector of [8 x i16], using the immediate\n" "/// value parameter as a specifier.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPSHUFHW / PSHUFHW instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits\n" "/// [63:0] of the result.\n" "/// \\param imm\n" "/// An 8-bit immediate value specifying which elements to copy from \\a a. \\n\n" "/// Bits[1:0] are used to assign values to bits [79:64] of the result. \\n\n" "/// Bits[3:2] are used to assign values to bits [95:80] of the result. \\n\n" "/// Bits[5:4] are used to assign values to bits [111:96] of the result. \\n\n" "/// Bits[7:6] are used to assign values to bits [127:112] of the result. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: assign values from bits [79:64] of \\a a. \\n\n" "/// 01: assign values from bits [95:80] of \\a a. \\n\n" "/// 10: assign values from bits [111:96] of \\a a. \\n\n" "/// 11: assign values from bits [127:112] of \\a a. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 128-bit integer vector containing the shuffled values.\n" "#define _mm_shufflehi_epi16(a, imm) \\\n" " ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))\n" "\n" "/// Unpacks the high-order (index 8-15) values from two 128-bit vectors\n" "/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKHBW / PUNPCKHBW \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [16 x i8].\n" "/// Bits [71:64] are written to bits [7:0] of the result. \\n\n" "/// Bits [79:72] are written to bits [23:16] of the result. \\n\n" "/// Bits [87:80] are written to bits [39:32] of the result. \\n\n" "/// Bits [95:88] are written to bits [55:48] of the result. \\n\n" "/// Bits [103:96] are written to bits [71:64] of the result. \\n\n" "/// Bits [111:104] are written to bits [87:80] of the result. \\n\n" "/// Bits [119:112] are written to bits [103:96] of the result. \\n\n" "/// Bits [127:120] are written to bits [119:112] of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [16 x i8]. \\n\n" "/// Bits [71:64] are written to bits [15:8] of the result. \\n\n" "/// Bits [79:72] are written to bits [31:24] of the result. \\n\n" "/// Bits [87:80] are written to bits [47:40] of the result. \\n\n" "/// Bits [95:88] are written to bits [63:56] of the result. \\n\n" "/// Bits [103:96] are written to bits [79:72] of the result. \\n\n" "/// Bits [111:104] are written to bits [95:88] of the result. \\n\n" "/// Bits [119:112] are written to bits [111:104] of the result. \\n\n" "/// Bits [127:120] are written to bits [127:120] of the result.\n" "/// \\returns A 128-bit vector of [16 x i8] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector(\n" " (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,\n" " 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);\n" "}\n" "\n" "/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of\n" "/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKHWD / PUNPCKHWD \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16].\n" "/// Bits [79:64] are written to bits [15:0] of the result. \\n\n" "/// Bits [95:80] are written to bits [47:32] of the result. \\n\n" "/// Bits [111:96] are written to bits [79:64] of the result. \\n\n" "/// Bits [127:112] are written to bits [111:96] of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16].\n" "/// Bits [79:64] are written to bits [31:16] of the result. \\n\n" "/// Bits [95:80] are written to bits [63:48] of the result. \\n\n" "/// Bits [111:96] are written to bits [95:80] of the result. \\n\n" "/// Bits [127:112] are written to bits [127:112] of the result.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,\n" " 8 + 5, 6, 8 + 6, 7, 8 + 7);\n" "}\n" "\n" "/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of\n" "/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKHDQ / PUNPCKHDQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32]. \\n\n" "/// Bits [95:64] are written to bits [31:0] of the destination. \\n\n" "/// Bits [127:96] are written to bits [95:64] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x i32]. \\n\n" "/// Bits [95:64] are written to bits [64:32] of the destination. \\n\n" "/// Bits [127:96] are written to bits [127:96] of the destination.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,\n" " 4 + 3);\n" "}\n" "\n" "/// Unpacks the high-order 64-bit elements from two 128-bit vectors of\n" "/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKHQDQ / PUNPCKHQDQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x i64]. \\n\n" "/// Bits [127:64] are written to bits [63:0] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x i64]. \\n\n" "/// Bits [127:64] are written to bits [127:64] of the destination.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);\n" "}\n" "\n" "/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of\n" "/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLBW / PUNPCKLBW \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [16 x i8]. \\n\n" "/// Bits [7:0] are written to bits [7:0] of the result. \\n\n" "/// Bits [15:8] are written to bits [23:16] of the result. \\n\n" "/// Bits [23:16] are written to bits [39:32] of the result. \\n\n" "/// Bits [31:24] are written to bits [55:48] of the result. \\n\n" "/// Bits [39:32] are written to bits [71:64] of the result. \\n\n" "/// Bits [47:40] are written to bits [87:80] of the result. \\n\n" "/// Bits [55:48] are written to bits [103:96] of the result. \\n\n" "/// Bits [63:56] are written to bits [119:112] of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [16 x i8].\n" "/// Bits [7:0] are written to bits [15:8] of the result. \\n\n" "/// Bits [15:8] are written to bits [31:24] of the result. \\n\n" "/// Bits [23:16] are written to bits [47:40] of the result. \\n\n" "/// Bits [31:24] are written to bits [63:56] of the result. \\n\n" "/// Bits [39:32] are written to bits [79:72] of the result. \\n\n" "/// Bits [47:40] are written to bits [95:88] of the result. \\n\n" "/// Bits [55:48] are written to bits [111:104] of the result. \\n\n" "/// Bits [63:56] are written to bits [127:120] of the result.\n" "/// \\returns A 128-bit vector of [16 x i8] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector(\n" " (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,\n" " 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);\n" "}\n" "\n" "/// Unpacks the low-order (index 0-3) values from each of the two 128-bit\n" "/// vectors of [8 x i16] and interleaves them into a 128-bit vector of\n" "/// [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLWD / PUNPCKLWD \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16].\n" "/// Bits [15:0] are written to bits [15:0] of the result. \\n\n" "/// Bits [31:16] are written to bits [47:32] of the result. \\n\n" "/// Bits [47:32] are written to bits [79:64] of the result. \\n\n" "/// Bits [63:48] are written to bits [111:96] of the result.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16].\n" "/// Bits [15:0] are written to bits [31:16] of the result. \\n\n" "/// Bits [31:16] are written to bits [63:48] of the result. \\n\n" "/// Bits [47:32] are written to bits [95:80] of the result. \\n\n" "/// Bits [63:48] are written to bits [127:112] of the result.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,\n" " 8 + 1, 2, 8 + 2, 3, 8 + 3);\n" "}\n" "\n" "/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of\n" "/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLDQ / PUNPCKLDQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32]. \\n\n" "/// Bits [31:0] are written to bits [31:0] of the destination. \\n\n" "/// Bits [63:32] are written to bits [95:64] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x i32]. \\n\n" "/// Bits [31:0] are written to bits [64:32] of the destination. \\n\n" "/// Bits [63:32] are written to bits [127:96] of the destination.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,\n" " 4 + 1);\n" "}\n" "\n" "/// Unpacks the low-order 64-bit elements from two 128-bit vectors of\n" "/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPUNPCKLQDQ / PUNPCKLQDQ \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x i64]. \\n\n" "/// Bits [63:0] are written to bits [63:0] of the destination. \\n\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x i64]. \\n\n" "/// Bits [63:0] are written to bits [127:64] of the destination. \\n\n" "/// \\returns A 128-bit vector of [2 x i64] containing the interleaved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,\n" " __m128i __b) {\n" " return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);\n" "}\n" "\n" "/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit\n" "/// integer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVDQ2Q instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector operand. The lower 64 bits are moved to the\n" "/// destination.\n" "/// \\returns A 64-bit integer containing the lower 64 bits of the parameter.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) {\n" " return (__m64)__a[0];\n" "}\n" "\n" "/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the\n" "/// upper bits.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVD+VMOVQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit value.\n" "/// \\returns A 128-bit integer vector. The lower 64 bits contain the value from\n" "/// the operand. The upper 64 bits are assigned zeros.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) {\n" " return __extension__(__m128i)(__v2di){(long long)__a, 0};\n" "}\n" "\n" "/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit\n" "/// integer vector, zeroing the upper bits.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVQ / MOVQ instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector operand. The lower 64 bits are moved to the\n" "/// destination.\n" "/// \\returns A 128-bit integer vector. The lower 64 bits contain the value from\n" "/// the operand. The upper 64 bits are assigned zeros.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) {\n" " return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);\n" "}\n" "\n" "/// Unpacks the high-order 64-bit elements from two 128-bit vectors of\n" "/// [2 x double] and interleaves them into a 128-bit vector of [2 x\n" "/// double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [127:64] are written to bits [63:0] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [127:64] are written to bits [127:64] of the destination.\n" "/// \\returns A 128-bit vector of [2 x double] containing the interleaved values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1);\n" "}\n" "\n" "/// Unpacks the low-order 64-bit elements from two 128-bit vectors\n" "/// of [2 x double] and interleaves them into a 128-bit vector of [2 x\n" "/// double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [63:0] are written to bits [63:0] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double]. \\n\n" "/// Bits [63:0] are written to bits [127:64] of the destination.\n" "/// \\returns A 128-bit vector of [2 x double] containing the interleaved values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a,\n" " __m128d __b) {\n" " return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0);\n" "}\n" "\n" "/// Extracts the sign bits of the double-precision values in the 128-bit\n" "/// vector of [2 x double], zero-extends the value, and writes it to the\n" "/// low-order bits of the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVMSKPD / MOVMSKPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the values with sign bits to\n" "/// be extracted.\n" "/// \\returns The sign bits from each of the double-precision elements in \\a __a,\n" "/// written to bits [1:0]. The remaining bits are assigned values of zero.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {\n" " return __builtin_ia32_movmskpd((__v2df)__a);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [2 x double] from two\n" "/// 128-bit vector parameters of [2 x double], using the immediate-value\n" "/// parameter as a specifier.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VSHUFPD / SHUFPD instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param b\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param i\n" "/// An 8-bit immediate value. The least significant two bits specify which\n" "/// elements to copy from \\a a and \\a b: \\n\n" "/// Bit[0] = 0: lower element of \\a a copied to lower element of result. \\n\n" "/// Bit[0] = 1: upper element of \\a a copied to lower element of result. \\n\n" "/// Bit[1] = 0: lower element of \\a b copied to upper element of result. \\n\n" "/// Bit[1] = 1: upper element of \\a b copied to upper element of result. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE2 macro.\n" "/// _MM_SHUFFLE2(b1, b0) can create a 2-bit mask of the form\n" "/// [b1, b0].\n" "/// \\returns A 128-bit vector of [2 x double] containing the shuffled values.\n" "#define _mm_shuffle_pd(a, b, i) \\\n" " ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \\\n" " (int)(i)))\n" "\n" "/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit\n" "/// floating-point vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [2 x double].\n" "/// \\returns A 128-bit floating-point vector of [4 x float] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) {\n" " return (__m128)__a;\n" "}\n" "\n" "/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit\n" "/// integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [2 x double].\n" "/// \\returns A 128-bit integer vector containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) {\n" " return (__m128i)__a;\n" "}\n" "\n" "/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit\n" "/// floating-point vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float].\n" "/// \\returns A 128-bit floating-point vector of [2 x double] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) {\n" " return (__m128d)__a;\n" "}\n" "\n" "/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit\n" "/// integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float].\n" "/// \\returns A 128-bit integer vector containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) {\n" " return (__m128i)__a;\n" "}\n" "\n" "/// Casts a 128-bit integer vector into a 128-bit floating-point vector\n" "/// of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit floating-point vector of [4 x float] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) {\n" " return (__m128)__a;\n" "}\n" "\n" "/// Casts a 128-bit integer vector into a 128-bit floating-point vector\n" "/// of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit floating-point vector of [2 x double] containing the same\n" "/// bitwise pattern as the parameter.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {\n" " return (__m128d)__a;\n" "}\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/// Indicates that a spin loop is being executed for the purposes of\n" "/// optimizing power consumption during the loop.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PAUSE instruction.\n" "///\n" "void _mm_pause(void);\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_MMX\n" "\n" "#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))\n" "\n" "#define _MM_DENORMALS_ZERO_ON (0x0040U)\n" "#define _MM_DENORMALS_ZERO_OFF (0x0000U)\n" "\n" "#define _MM_DENORMALS_ZERO_MASK (0x0040U)\n" "\n" "#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)\n" "#define _MM_SET_DENORMALS_ZERO_MODE(x) \\\n" " (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))\n" "\n" "#endif /* __EMMINTRIN_H */\n" "" } , { "/builtins/enqcmdintrin.h" , "/*===------------------ enqcmdintrin.h - enqcmd intrinsics -----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __ENQCMDINTRIN_H\n" "#define __ENQCMDINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file */\n" "#define _DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"enqcmd\")))\n" "\n" "/// Reads 64-byte command pointed by \\a __src, formats 64-byte enqueue store\n" "/// data, and performs 64-byte enqueue store to memory pointed by \\a __dst.\n" "/// This intrinsics may only be used in User mode.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsics corresponds to the ENQCMD instruction.\n" "///\n" "/// \\param __dst\n" "/// Pointer to the destination of the enqueue store.\n" "/// \\param __src\n" "/// Pointer to 64-byte command data.\n" "/// \\returns If the command data is successfully written to \\a __dst then 0 is\n" "/// returned. Otherwise 1 is returned.\n" "static __inline__ int _DEFAULT_FN_ATTRS\n" "_enqcmd (void *__dst, const void *__src)\n" "{\n" " return __builtin_ia32_enqcmd(__dst, __src);\n" "}\n" "\n" "/// Reads 64-byte command pointed by \\a __src, formats 64-byte enqueue store\n" "/// data, and performs 64-byte enqueue store to memory pointed by \\a __dst\n" "/// This intrinsic may only be used in Privileged mode.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsics corresponds to the ENQCMDS instruction.\n" "///\n" "/// \\param __dst\n" "/// Pointer to the destination of the enqueue store.\n" "/// \\param __src\n" "/// Pointer to 64-byte command data.\n" "/// \\returns If the command data is successfully written to \\a __dst then 0 is\n" "/// returned. Otherwise 1 is returned.\n" "static __inline__ int _DEFAULT_FN_ATTRS\n" "_enqcmds (void *__dst, const void *__src)\n" "{\n" " return __builtin_ia32_enqcmds(__dst, __src);\n" "}\n" "\n" "#undef _DEFAULT_FN_ATTRS\n" "\n" "#endif /* __ENQCMDINTRIN_H */\n" "" } , { "/builtins/f16cintrin.h" , "/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __F16CINTRIN_H\n" "#define __F16CINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"f16c\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"f16c\"), __min_vector_width__(256)))\n" "\n" "/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h,\n" " * but that's because icc can emulate these without f16c using a library call.\n" " * Since we don't do that let's leave these in f16cintrin.h.\n" " */\n" "\n" "/// Converts a 16-bit half-precision float value into a 32-bit float\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPH2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 16-bit half-precision float value.\n" "/// \\returns The converted 32-bit float value.\n" "static __inline float __DEFAULT_FN_ATTRS128\n" "_cvtsh_ss(unsigned short __a)\n" "{\n" " __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};\n" " __v4sf __r = __builtin_ia32_vcvtph2ps(__v);\n" " return __r[0];\n" "}\n" "\n" "/// Converts a 32-bit single-precision float value to a 16-bit\n" "/// half-precision float value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// unsigned short _cvtss_sh(float a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCVTPS2PH instruction.\n" "///\n" "/// \\param a\n" "/// A 32-bit single-precision float value to be converted to a 16-bit\n" "/// half-precision float value.\n" "/// \\param imm\n" "/// An immediate value controlling rounding using bits [2:0]: \\n\n" "/// 000: Nearest \\n\n" "/// 001: Down \\n\n" "/// 010: Up \\n\n" "/// 011: Truncate \\n\n" "/// 1XX: Use MXCSR.RC for rounding\n" "/// \\returns The converted 16-bit half-precision float value.\n" "#define _cvtss_sh(a, imm) __extension__ ({ \\\n" " (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \\\n" " (imm)))[0]); })\n" "\n" "/// Converts a 128-bit vector containing 32-bit float values into a\n" "/// 128-bit vector containing 16-bit half-precision float values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_cvtps_ph(__m128 a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCVTPS2PH instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector containing 32-bit float values.\n" "/// \\param imm\n" "/// An immediate value controlling rounding using bits [2:0]: \\n\n" "/// 000: Nearest \\n\n" "/// 001: Down \\n\n" "/// 010: Up \\n\n" "/// 011: Truncate \\n\n" "/// 1XX: Use MXCSR.RC for rounding\n" "/// \\returns A 128-bit vector containing converted 16-bit half-precision float\n" "/// values. The lower 64 bits are used to store the converted 16-bit\n" "/// half-precision floating-point values.\n" "#define _mm_cvtps_ph(a, imm) \\\n" " ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))\n" "\n" "/// Converts a 128-bit vector containing 16-bit half-precision float\n" "/// values into a 128-bit vector containing 32-bit float values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPH2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector containing 16-bit half-precision float values. The lower\n" "/// 64 bits are used in the conversion.\n" "/// \\returns A 128-bit vector of [4 x float] containing converted float values.\n" "static __inline __m128 __DEFAULT_FN_ATTRS128\n" "_mm_cvtph_ps(__m128i __a)\n" "{\n" " return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);\n" "}\n" "\n" "/// Converts a 256-bit vector of [8 x float] into a 128-bit vector\n" "/// containing 16-bit half-precision float values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VCVTPS2PH instruction.\n" "///\n" "/// \\param a\n" "/// A 256-bit vector containing 32-bit single-precision float values to be\n" "/// converted to 16-bit half-precision float values.\n" "/// \\param imm\n" "/// An immediate value controlling rounding using bits [2:0]: \\n\n" "/// 000: Nearest \\n\n" "/// 001: Down \\n\n" "/// 010: Up \\n\n" "/// 011: Truncate \\n\n" "/// 1XX: Use MXCSR.RC for rounding\n" "/// \\returns A 128-bit vector containing the converted 16-bit half-precision\n" "/// float values.\n" "#define _mm256_cvtps_ph(a, imm) \\\n" " ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)))\n" "\n" "/// Converts a 128-bit vector containing 16-bit half-precision float\n" "/// values into a 256-bit vector of [8 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTPH2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector containing 16-bit half-precision float values to be\n" "/// converted to 32-bit single-precision float values.\n" "/// \\returns A vector of [8 x float] containing the converted 32-bit\n" "/// single-precision float values.\n" "static __inline __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_cvtph_ps(__m128i __a)\n" "{\n" " return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif /* __F16CINTRIN_H */\n" "" } , { "/builtins/float.h" , "/*===---- float.h - Characteristics of floating point types ----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_FLOAT_H\n" "#define __CLANG_FLOAT_H\n" "\n" "/* If we're on MinGW, fall back to the system's float.h, which might have\n" " * additional definitions provided for Windows.\n" " * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx\n" " *\n" " * Also fall back on Darwin and AIX to allow additional definitions and\n" " * implementation-defined values.\n" " */\n" "#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) || \\\n" " defined(_AIX)) && \\\n" " __STDC_HOSTED__ && __has_include_next()\n" "\n" "/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level\n" " * of #include_next to keep Metrowerks compilers happy. Avoid this\n" " * extra indirection.\n" " */\n" "#ifdef __APPLE__\n" "#define _FLOAT_H_\n" "#endif\n" "\n" "# include_next \n" "\n" "/* Undefine anything that we'll be redefining below. */\n" "# undef FLT_EVAL_METHOD\n" "# undef FLT_ROUNDS\n" "# undef FLT_RADIX\n" "# undef FLT_MANT_DIG\n" "# undef DBL_MANT_DIG\n" "# undef LDBL_MANT_DIG\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \\\n" " !defined(__STRICT_ANSI__) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L) || \\\n" " (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))\n" "# undef DECIMAL_DIG\n" "# endif\n" "# undef FLT_DIG\n" "# undef DBL_DIG\n" "# undef LDBL_DIG\n" "# undef FLT_MIN_EXP\n" "# undef DBL_MIN_EXP\n" "# undef LDBL_MIN_EXP\n" "# undef FLT_MIN_10_EXP\n" "# undef DBL_MIN_10_EXP\n" "# undef LDBL_MIN_10_EXP\n" "# undef FLT_MAX_EXP\n" "# undef DBL_MAX_EXP\n" "# undef LDBL_MAX_EXP\n" "# undef FLT_MAX_10_EXP\n" "# undef DBL_MAX_10_EXP\n" "# undef LDBL_MAX_10_EXP\n" "# undef FLT_MAX\n" "# undef DBL_MAX\n" "# undef LDBL_MAX\n" "# undef FLT_EPSILON\n" "# undef DBL_EPSILON\n" "# undef LDBL_EPSILON\n" "# undef FLT_MIN\n" "# undef DBL_MIN\n" "# undef LDBL_MIN\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \\\n" " !defined(__STRICT_ANSI__) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201703L) || \\\n" " (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))\n" "# undef FLT_TRUE_MIN\n" "# undef DBL_TRUE_MIN\n" "# undef LDBL_TRUE_MIN\n" "# undef FLT_DECIMAL_DIG\n" "# undef DBL_DECIMAL_DIG\n" "# undef LDBL_DECIMAL_DIG\n" "# undef FLT_HAS_SUBNORM\n" "# undef DBL_HAS_SUBNORM\n" "# undef LDBL_HAS_SUBNORM\n" "# endif\n" "#endif\n" "\n" "/* Characteristics of floating point types, C99 5.2.4.2.2 */\n" "\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L)\n" "#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__\n" "#endif\n" "#define FLT_ROUNDS (__builtin_flt_rounds())\n" "#define FLT_RADIX __FLT_RADIX__\n" "\n" "#define FLT_MANT_DIG __FLT_MANT_DIG__\n" "#define DBL_MANT_DIG __DBL_MANT_DIG__\n" "#define LDBL_MANT_DIG __LDBL_MANT_DIG__\n" "\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \\\n" " !defined(__STRICT_ANSI__) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L) || \\\n" " (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))\n" "# define DECIMAL_DIG __DECIMAL_DIG__\n" "#endif\n" "\n" "#define FLT_DIG __FLT_DIG__\n" "#define DBL_DIG __DBL_DIG__\n" "#define LDBL_DIG __LDBL_DIG__\n" "\n" "#define FLT_MIN_EXP __FLT_MIN_EXP__\n" "#define DBL_MIN_EXP __DBL_MIN_EXP__\n" "#define LDBL_MIN_EXP __LDBL_MIN_EXP__\n" "\n" "#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__\n" "#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__\n" "#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__\n" "\n" "#define FLT_MAX_EXP __FLT_MAX_EXP__\n" "#define DBL_MAX_EXP __DBL_MAX_EXP__\n" "#define LDBL_MAX_EXP __LDBL_MAX_EXP__\n" "\n" "#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__\n" "#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__\n" "#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__\n" "\n" "#define FLT_MAX __FLT_MAX__\n" "#define DBL_MAX __DBL_MAX__\n" "#define LDBL_MAX __LDBL_MAX__\n" "\n" "#define FLT_EPSILON __FLT_EPSILON__\n" "#define DBL_EPSILON __DBL_EPSILON__\n" "#define LDBL_EPSILON __LDBL_EPSILON__\n" "\n" "#define FLT_MIN __FLT_MIN__\n" "#define DBL_MIN __DBL_MIN__\n" "#define LDBL_MIN __LDBL_MIN__\n" "\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \\\n" " !defined(__STRICT_ANSI__) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201703L) || \\\n" " (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))\n" "# define FLT_TRUE_MIN __FLT_DENORM_MIN__\n" "# define DBL_TRUE_MIN __DBL_DENORM_MIN__\n" "# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__\n" "# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__\n" "# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__\n" "# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__\n" "# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__\n" "# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__\n" "# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__\n" "#endif\n" "\n" "#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__\n" "# define FLT16_MANT_DIG __FLT16_MANT_DIG__\n" "# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__\n" "# define FLT16_DIG __FLT16_DIG__\n" "# define FLT16_MIN_EXP __FLT16_MIN_EXP__\n" "# define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__\n" "# define FLT16_MAX_EXP __FLT16_MAX_EXP__\n" "# define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__\n" "# define FLT16_MAX __FLT16_MAX__\n" "# define FLT16_EPSILON __FLT16_EPSILON__\n" "# define FLT16_MIN __FLT16_MIN__\n" "# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__\n" "#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */\n" "\n" "#endif /* __CLANG_FLOAT_H */\n" "" } , { "/builtins/fma4intrin.h" , "/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __FMA4INTRIN_H\n" "#define __FMA4INTRIN_H\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__(\"fma4\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__(\"fma4\"), __min_vector_width__(256)))\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif /* __FMA4INTRIN_H */\n" "" } , { "/builtins/fmaintrin.h" , "/*===---- fmaintrin.h - FMA intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __FMAINTRIN_H\n" "#define __FMAINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__(\"fma\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__(\"fma\"), __min_vector_width__(256)))\n" "\n" "/// Computes a multiply-add of 128-bit vectors of [4 x float].\n" "/// For each element, computes (__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "/// Computes a multiply-add of 128-bit vectors of [2 x double].\n" "/// For each element, computes (__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend.\n" "/// \\returns A 128-bit [2 x double] vector containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "/// Computes a scalar multiply-add of the single-precision values in the\n" "/// low 32 bits of 128-bit vectors of [4 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]\n" "/// result[127:32] = __A[127:32]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213SS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand in the low\n" "/// 32 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier in the low\n" "/// 32 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend in the low\n" "/// 32 bits.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result in the low\n" "/// 32 bits and a copy of \\a __A[127:32] in the upper 96 bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "/// Computes a scalar multiply-add of the double-precision values in the\n" "/// low 64 bits of 128-bit vectors of [2 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]\n" "/// result[127:64] = __A[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213SD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand in the low\n" "/// 64 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier in the low\n" "/// 64 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend in the low\n" "/// 64 bits.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result in the low\n" "/// 64 bits and a copy of \\a __A[127:64] in the upper 64 bits.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "/// Computes a multiply-subtract of 128-bit vectors of [4 x float].\n" "/// For each element, computes (__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "/// Computes a multiply-subtract of 128-bit vectors of [2 x double].\n" "/// For each element, computes (__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "/// Computes a scalar multiply-subtract of the single-precision values in\n" "/// the low 32 bits of 128-bit vectors of [4 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]\n" "/// result[127:32] = __A[127:32]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213SS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand in the low\n" "/// 32 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier in the low\n" "/// 32 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the subtrahend in the low\n" "/// 32 bits.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result in the low\n" "/// 32 bits, and a copy of \\a __A[127:32] in the upper 96 bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "/// Computes a scalar multiply-subtract of the double-precision values in\n" "/// the low 64 bits of 128-bit vectors of [2 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]\n" "/// result[127:64] = __A[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213SD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand in the low\n" "/// 64 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier in the low\n" "/// 64 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the subtrahend in the low\n" "/// 64 bits.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result in the low\n" "/// 64 bits, and a copy of \\a __A[127:64] in the upper 64 bits.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "/// Computes a negated multiply-add of 128-bit vectors of [4 x float].\n" "/// For each element, computes -(__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213DPS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend.\n" "/// \\returns A 128-bit [4 x float] vector containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "/// Computes a negated multiply-add of 128-bit vectors of [2 x double].\n" "/// For each element, computes -(__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "/// Computes a scalar negated multiply-add of the single-precision values in\n" "/// the low 32 bits of 128-bit vectors of [4 x float].\n" "/// \\code\n" "/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]\n" "/// result[127:32] = __A[127:32]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213SS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand in the low\n" "/// 32 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier in the low\n" "/// 32 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend in the low\n" "/// 32 bits.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result in the low\n" "/// 32 bits, and a copy of \\a __A[127:32] in the upper 96 bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "/// Computes a scalar negated multiply-add of the double-precision values\n" "/// in the low 64 bits of 128-bit vectors of [2 x double].\n" "/// \\code\n" "/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]\n" "/// result[127:64] = __A[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213SD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand in the low\n" "/// 64 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier in the low\n" "/// 64 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend in the low\n" "/// 64 bits.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result in the low\n" "/// 64 bits, and a copy of \\a __A[127:64] in the upper 64 bits.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float].\n" "/// For each element, computes -(__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].\n" "/// For each element, computes -(__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "/// Computes a scalar negated multiply-subtract of the single-precision\n" "/// values in the low 32 bits of 128-bit vectors of [4 x float].\n" "/// \\code\n" "/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]\n" "/// result[127:32] = __A[127:32]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213SS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand in the low\n" "/// 32 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier in the low\n" "/// 32 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the subtrahend in the low\n" "/// 32 bits.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result in the low\n" "/// 32 bits, and a copy of \\a __A[127:32] in the upper 96 bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "/// Computes a scalar negated multiply-subtract of the double-precision\n" "/// values in the low 64 bits of 128-bit vectors of [2 x double].\n" "/// \\code\n" "/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]\n" "/// result[127:64] = __A[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213SD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand in the low\n" "/// 64 bits.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier in the low\n" "/// 64 bits.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the subtrahend in the low\n" "/// 64 bits.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result in the low\n" "/// 64 bits, and a copy of \\a __A[127:64] in the upper 64 bits.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 128-bit vectors of\n" "/// [4 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]\n" "/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]\n" "/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]\n" "/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADDSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend/subtrahend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 128-bit vectors of\n" "/// [2 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]\n" "/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADDSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend/subtrahend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 128-bit vectors of\n" "/// [4 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]\n" "/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]\n" "/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]\n" "/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUBADD213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x float] containing the addend/subtrahend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the result.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS128\n" "_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)\n" "{\n" " return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 128-bit vectors of\n" "/// [2 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]\n" "/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADDSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [2 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x double] containing the addend/subtrahend.\n" "/// \\returns A 128-bit vector of [2 x double] containing the result.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS128\n" "_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)\n" "{\n" " return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);\n" "}\n" "\n" "/// Computes a multiply-add of 256-bit vectors of [8 x float].\n" "/// For each element, computes (__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the addend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "/// Computes a multiply-add of 256-bit vectors of [4 x double].\n" "/// For each element, computes (__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADD213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the addend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "/// Computes a multiply-subtract of 256-bit vectors of [8 x float].\n" "/// For each element, computes (__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "/// Computes a multiply-subtract of 256-bit vectors of [4 x double].\n" "/// For each element, computes (__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "/// Computes a negated multiply-add of 256-bit vectors of [8 x float].\n" "/// For each element, computes -(__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the addend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "/// Computes a negated multiply-add of 256-bit vectors of [4 x double].\n" "/// For each element, computes -(__A * __B) + __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMADD213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the addend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].\n" "/// For each element, computes -(__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].\n" "/// For each element, computes -(__A * __B) - __C .\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFNMSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the subtrahend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 256-bit vectors of\n" "/// [8 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]\n" "/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]\n" "/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]\n" "/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]\n" "/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128]\n" "/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160]\n" "/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192]\n" "/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADDSUB213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the addend/subtrahend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);\n" "}\n" "\n" "/// Computes a multiply with alternating add/subtract of 256-bit vectors of\n" "/// [4 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]\n" "/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]\n" "/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]\n" "/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMADDSUB213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the addend/subtrahend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);\n" "}\n" "\n" "/// Computes a vector multiply with alternating add/subtract of 256-bit\n" "/// vectors of [8 x float].\n" "/// \\code\n" "/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]\n" "/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]\n" "/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]\n" "/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96]\n" "/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128]\n" "/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160]\n" "/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192]\n" "/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUBADD213PS instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x float] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x float] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [8 x float] containing the addend/subtrahend.\n" "/// \\returns A 256-bit vector of [8 x float] containing the result.\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)\n" "{\n" " return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);\n" "}\n" "\n" "/// Computes a vector multiply with alternating add/subtract of 256-bit\n" "/// vectors of [4 x double].\n" "/// \\code\n" "/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]\n" "/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]\n" "/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]\n" "/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192]\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VFMSUBADD213PD instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x double] containing the multiplicand.\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x double] containing the multiplier.\n" "/// \\param __C\n" "/// A 256-bit vector of [4 x double] containing the addend/subtrahend.\n" "/// \\returns A 256-bit vector of [4 x double] containing the result.\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)\n" "{\n" " return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif /* __FMAINTRIN_H */\n" "" } , { "/builtins/fxsrintrin.h" , "/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __FXSRINTRIN_H\n" "#define __FXSRINTRIN_H\n" "\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"fxsr\")))\n" "\n" "/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte\n" "/// memory region pointed to by the input parameter \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the FXSAVE instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 512-byte memory region. The beginning of this memory\n" "/// region should be aligned on a 16-byte boundary.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_fxsave(void *__p)\n" "{\n" " __builtin_ia32_fxsave(__p);\n" "}\n" "\n" "/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte\n" "/// memory region pointed to by the input parameter \\a __p. The contents of\n" "/// this memory region should have been written to by a previous \\c _fxsave\n" "/// or \\c _fxsave64 intrinsic.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the FXRSTOR instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 512-byte memory region. The beginning of this memory\n" "/// region should be aligned on a 16-byte boundary.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_fxrstor(void *__p)\n" "{\n" " __builtin_ia32_fxrstor(__p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte\n" "/// memory region pointed to by the input parameter \\a __p.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the FXSAVE64 instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 512-byte memory region. The beginning of this memory\n" "/// region should be aligned on a 16-byte boundary.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_fxsave64(void *__p)\n" "{\n" " __builtin_ia32_fxsave64(__p);\n" "}\n" "\n" "/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte\n" "/// memory region pointed to by the input parameter \\a __p. The contents of\n" "/// this memory region should have been written to by a previous \\c _fxsave\n" "/// or \\c _fxsave64 intrinsic.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the FXRSTOR64 instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 512-byte memory region. The beginning of this memory\n" "/// region should be aligned on a 16-byte boundary.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_fxrstor64(void *__p)\n" "{\n" " __builtin_ia32_fxrstor64(__p);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/gfniintrin.h" , "/*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __GFNIINTRIN_H\n" "#define __GFNIINTRIN_H\n" "\n" "/* Default attributes for simple form (no masking). */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"gfni\"), __min_vector_width__(128)))\n" "\n" "/* Default attributes for YMM unmasked form. */\n" "#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__(\"avx,gfni\"), __min_vector_width__(256)))\n" "\n" "/* Default attributes for ZMM unmasked forms. */\n" "#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__(\"avx512f,gfni\"), __min_vector_width__(512)))\n" "/* Default attributes for ZMM masked forms. */\n" "#define __DEFAULT_FN_ATTRS_Z_MASK __attribute__((__always_inline__, __nodebug__, __target__(\"avx512bw,gfni\"), __min_vector_width__(512)))\n" "\n" "/* Default attributes for VLX masked forms. */\n" "#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__(\"avx512bw,avx512vl,gfni\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__(\"avx512bw,avx512vl,gfni\"), __min_vector_width__(256)))\n" "\n" "#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \\\n" " ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), \\\n" " (char)(I)))\n" "\n" "#define _mm_gf2p8affine_epi64_epi8(A, B, I) \\\n" " ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), \\\n" " (char)(I)))\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_gf2p8mul_epi8(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,\n" " (__v16qi) __B);\n" "}\n" "\n" "#ifdef __AVXINTRIN_H\n" "#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \\\n" " ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \\\n" " (__v32qi)(__m256i)(B), \\\n" " (char)(I)))\n" "\n" "#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \\\n" " ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \\\n" " (__v32qi)(__m256i)(B), \\\n" " (char)(I)))\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS_Y\n" "_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,\n" " (__v32qi) __B);\n" "}\n" "#endif /* __AVXINTRIN_H */\n" "\n" "#ifdef __AVX512BWINTRIN_H\n" "#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \\\n" " ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \\\n" " (__v64qi)(__m512i)(B), \\\n" " (char)(I)))\n" "\n" "#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \\\n" " ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \\\n" " (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \\\n" " (__v64qi)(__m512i)(S)))\n" "\n" "#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \\\n" " _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \\\n" " U, A, B, I)\n" "\n" "#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \\\n" " ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \\\n" " (__v64qi)(__m512i)(B), \\\n" " (char)(I)))\n" "\n" "#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \\\n" " ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \\\n" " (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \\\n" " (__v64qi)(__m512i)(S)))\n" "\n" "#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \\\n" " _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \\\n" " U, A, B, I)\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_Z\n" "_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,\n" " (__v64qi) __B);\n" "}\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK\n" "_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_selectb_512(__U,\n" " (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),\n" " (__v64qi) __S);\n" "}\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK\n" "_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)\n" "{\n" " return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),\n" " __U, __A, __B);\n" "}\n" "#endif /* __AVX512BWINTRIN_H */\n" "\n" "#ifdef __AVX512VLBWINTRIN_H\n" "#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \\\n" " ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \\\n" " (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \\\n" " (__v16qi)(__m128i)(S)))\n" "\n" "#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \\\n" " _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \\\n" " U, A, B, I)\n" "\n" "#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \\\n" " ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \\\n" " (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \\\n" " (__v32qi)(__m256i)(S)))\n" "\n" "#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \\\n" " _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \\\n" " U, A, B, I)\n" "\n" "#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \\\n" " ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \\\n" " (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \\\n" " (__v16qi)(__m128i)(S)))\n" "\n" "#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \\\n" " _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)\n" "\n" "#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \\\n" " ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \\\n" " (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \\\n" " (__v32qi)(__m256i)(S)))\n" "\n" "#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \\\n" " _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \\\n" " U, A, B, I)\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128\n" "_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)\n" "{\n" " return (__m128i) __builtin_ia32_selectb_128(__U,\n" " (__v16qi) _mm_gf2p8mul_epi8(__A, __B),\n" " (__v16qi) __S);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128\n" "_mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)\n" "{\n" " return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(),\n" " __U, __A, __B);\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256\n" "_mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_selectb_256(__U,\n" " (__v32qi) _mm256_gf2p8mul_epi8(__A, __B),\n" " (__v32qi) __S);\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256\n" "_mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)\n" "{\n" " return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),\n" " __U, __A, __B);\n" "}\n" "#endif /* __AVX512VLBWINTRIN_H */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_Y\n" "#undef __DEFAULT_FN_ATTRS_Z\n" "#undef __DEFAULT_FN_ATTRS_VL128\n" "#undef __DEFAULT_FN_ATTRS_VL256\n" "\n" "#endif /* __GFNIINTRIN_H */\n" "\n" "" } , { "/builtins/hexagon_circ_brev_intrinsics.h" , "//===----------------------------------------------------------------------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "\n" "#ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_\n" "#define _HEXAGON_CIRC_BREV_INTRINSICS_H_ 1\n" "\n" "#include \n" "#include \n" "\n" "/* Circular Load */\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_D(Word64 dst, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_D(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (int64_t *) HEXAGON_circ_ldd (ptr, &(dest), ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_W(Word32 dst, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_W(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (int *) HEXAGON_circ_ldw (ptr, &(dest), (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_H(Word16 dst, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_H(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (int16_t *) HEXAGON_circ_ldh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_UH( UWord16 dst, UWord16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_UH(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (uint16_t *) HEXAGON_circ_lduh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_B(Word8 dst, Word8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_B(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (int8_t *) HEXAGON_circ_ldb (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_load_update_UB(dest,ptr,incr,bufsize,K) \\\n" " { ptr = (uint8_t *) HEXAGON_circ_ldub (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }\n" "\n" "/* Circular Store */\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_store_update_D(Word64 *src, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_store_update_D(src,ptr,incr,bufsize,K) \\\n" " { ptr = (int64_t *) HEXAGON_circ_std (ptr, src, ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_store_update_W(Word32 *src, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_store_update_W(src,ptr,incr,bufsize,K) \\\n" " { ptr = (int *) HEXAGON_circ_stw (ptr, src, (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_store_update_HL(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_store_update_HL(src,ptr,incr,bufsize,K) \\\n" " { ptr = (int16_t *) HEXAGON_circ_sth (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_store_update_HH(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_store_update_HH(src,ptr,incr,bufsize,K) \\\n" " { ptr = (int16_t *) HEXAGON_circ_sthhi (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_circ_store_update_B(Word8 *src, Word8 *ptr, UWord32 I4, UWord32 bufsize, UWord64 K)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_circ_store_update_B(src,ptr,incr,bufsize,K) \\\n" " { ptr = (int8_t *) HEXAGON_circ_stb (ptr, src, ((((K)-2)<<24)|(bufsize)), incr); }\n" "\n" "\n" "/* Bit Reverse Load */\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_D(Word64 dst, Word64 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_D(dest,ptr,log2bufsize) \\\n" " { ptr = (int64_t *) HEXAGON_brev_ldd (ptr, &(dest), (1<<(16-((log2bufsize) + 3)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_W(Word32 dst, Word32 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_W(dest,ptr,log2bufsize) \\\n" " { ptr = (int *) HEXAGON_brev_ldw (ptr, &(dest), (1<<(16-((log2bufsize) + 2)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_H(Word16 dst, Word16 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_H(dest,ptr,log2bufsize) \\\n" " { ptr = (int16_t *) HEXAGON_brev_ldh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_UH(UWord16 dst, UWord16 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_UH(dest,ptr,log2bufsize) \\\n" " { ptr = (uint16_t *) HEXAGON_brev_lduh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_B(Word8 dst, Word8 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_B(dest,ptr,log2bufsize) \\\n" " { ptr = (int8_t *) HEXAGON_brev_ldb (ptr, &(dest), (1<<(16-((log2bufsize))))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_load_update_UB(dest,ptr,log2bufsize) \\\n" " { ptr = (uint8_t *) HEXAGON_brev_ldub (ptr, &(dest), (1<<(16-((log2bufsize))))); }\n" "\n" "/* Bit Reverse Store */\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_store_update_D(Word64 *src, Word64 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_store_update_D(src,ptr,log2bufsize) \\\n" " { ptr = (int64_t *) HEXAGON_brev_std (ptr, src, (1<<(16-((log2bufsize) + 3)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_store_update_W(Word32 *src, Word32 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_store_update_W(src,ptr,log2bufsize) \\\n" " { ptr = (int *) HEXAGON_brev_stw (ptr, src, (1<<(16-((log2bufsize) + 2)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_store_update_HL(Word16 *src, Word16 *ptr, Word32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_store_update_HL(src,ptr,log2bufsize) \\\n" " { ptr = (int16_t *) HEXAGON_brev_sth (ptr, src, (1<<(16-((log2bufsize) + 1)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_store_update_HH(Word16 *src, Word16 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_store_update_HH(src,ptr,log2bufsize) \\\n" " { ptr = (int16_t *) HEXAGON_brev_sthhi (ptr, src, (1<<(16-((log2bufsize) + 1)))); }\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: void Q6_bitrev_store_update_B(Word8 *src, Word8 *ptr, UWord32 Iu4)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#define Q6_bitrev_store_update_B(src,ptr,log2bufsize) \\\n" " { ptr = (int8_t *) HEXAGON_brev_stb (ptr, src, (1<<(16-((log2bufsize))))); }\n" "\n" "\n" "#define HEXAGON_circ_ldd __builtin_circ_ldd\n" "#define HEXAGON_circ_ldw __builtin_circ_ldw\n" "#define HEXAGON_circ_ldh __builtin_circ_ldh\n" "#define HEXAGON_circ_lduh __builtin_circ_lduh\n" "#define HEXAGON_circ_ldb __builtin_circ_ldb\n" "#define HEXAGON_circ_ldub __builtin_circ_ldub\n" "\n" "\n" "#define HEXAGON_circ_std __builtin_circ_std\n" "#define HEXAGON_circ_stw __builtin_circ_stw\n" "#define HEXAGON_circ_sth __builtin_circ_sth\n" "#define HEXAGON_circ_sthhi __builtin_circ_sthhi\n" "#define HEXAGON_circ_stb __builtin_circ_stb\n" "\n" "\n" "#define HEXAGON_brev_ldd __builtin_brev_ldd\n" "#define HEXAGON_brev_ldw __builtin_brev_ldw\n" "#define HEXAGON_brev_ldh __builtin_brev_ldh\n" "#define HEXAGON_brev_lduh __builtin_brev_lduh\n" "#define HEXAGON_brev_ldb __builtin_brev_ldb\n" "#define HEXAGON_brev_ldub __builtin_brev_ldub\n" "\n" "#define HEXAGON_brev_std __builtin_brev_std\n" "#define HEXAGON_brev_stw __builtin_brev_stw\n" "#define HEXAGON_brev_sth __builtin_brev_sth\n" "#define HEXAGON_brev_sthhi __builtin_brev_sthhi\n" "#define HEXAGON_brev_stb __builtin_brev_stb\n" "\n" "#ifdef __HVX__\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qt) vmem(Rt+#0) = Vs\n" " C Intrinsic Prototype: void Q6_vmaskedstoreq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)\n" " Instruction Type: COPROC_VMEM\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmaskedstoreq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstoreq)\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qt) vmem(Rt+#0) = Vs\n" " C Intrinsic Prototype: void Q6_vmaskedstorenq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)\n" " Instruction Type: COPROC_VMEM\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmaskedstorenq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorenq)\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qt) vmem(Rt+#0):nt = Vs\n" " C Intrinsic Prototype: void Q6_vmaskedstorentq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)\n" " Instruction Type: COPROC_VMEM\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmaskedstorentq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentq)\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qt) vmem(Rt+#0):nt = Vs\n" " C Intrinsic Prototype: void Q6_vmaskedstorentnq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)\n" " Instruction Type: COPROC_VMEM\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmaskedstorentnq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentnq)\n" "\n" "#endif\n" "\n" "\n" "#endif /* #ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_ */\n" "\n" "#ifdef __NOT_DEFINED__\n" "/*** comment block template ***/\n" "/* ==========================================================================\n" " Assembly Syntax: Return=instruction()\n" " C Intrinsic Prototype: ReturnType Intrinsic(ParamType Rs, ParamType Rt)\n" " Instruction Type: InstructionType\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "#endif /*** __NOT_DEFINED__ ***/\n" "" } , { "/builtins/hexagon_protos.h" , "//===----------------------------------------------------------------------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "// Automatically generated file, do not edit!\n" "//===----------------------------------------------------------------------===//\n" "\n" "\n" "\n" "#ifndef __HEXAGON_PROTOS_H_\n" "#define __HEXAGON_PROTOS_H_ 1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=abs(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_abs_R __builtin_HEXAGON_A2_abs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=abs(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_abs_P __builtin_HEXAGON_A2_absp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=abs(Rs32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RR __builtin_HEXAGON_A2_add\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rs32,#s16)\n" " C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16)\n" " Instruction Type: ALU32_ADDI\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RI __builtin_HEXAGON_A2_addi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=add(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_add_PP __builtin_HEXAGON_A2_addp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=add(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=add(Rs32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=and(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_and_RR __builtin_HEXAGON_A2_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=and(Rs32,#s10)\n" " C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_and_RI __builtin_HEXAGON_A2_andir\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=and(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_and_PP __builtin_HEXAGON_A2_andp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=aslh(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asrh(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=combine(Rt32.h,Rs32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=combine(Rt32.h,Rs32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=combine(Rt32.l,Rs32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=combine(Rt32.l,Rs32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=combine(#s8,#S8)\n" " C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=combine(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=max(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_max_RR __builtin_HEXAGON_A2_max\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=max(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=maxu(Rs32,Rt32)\n" " C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=maxu(Rss32,Rtt32)\n" " C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=min(Rt32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_min_RR __builtin_HEXAGON_A2_min\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=min(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_min_PP __builtin_HEXAGON_A2_minp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=minu(Rt32,Rs32)\n" " C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=minu(Rtt32,Rss32)\n" " C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=neg(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_neg_R __builtin_HEXAGON_A2_neg\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=neg(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_neg_P __builtin_HEXAGON_A2_negp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=neg(Rs32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=not(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_not_R __builtin_HEXAGON_A2_not\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=not(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_not_P __builtin_HEXAGON_A2_notp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=or(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_RR __builtin_HEXAGON_A2_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=or(Rs32,#s10)\n" " C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_RI __builtin_HEXAGON_A2_orir\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=or(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_or_PP __builtin_HEXAGON_A2_orp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=round(Rss32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sat(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sat_P __builtin_HEXAGON_A2_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=satb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_satb_R __builtin_HEXAGON_A2_satb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sath(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sath_R __builtin_HEXAGON_A2_sath\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=satub(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_satub_R __builtin_HEXAGON_A2_satub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=satuh(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat:<<16\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=sub(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(#s10,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sub(Rt32,Rs32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vaddh(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vaddh(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vadduh(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vavgh(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vavgh(Rs32,Rt32):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vnavgh(Rt32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsubh(Rt32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsubh(Rt32,Rs32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsubuh(Rt32,Rs32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=swiz(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sxtb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sxth(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=sxtw(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=Rs32\n" " C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32.h=#u16\n" " C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32.l=#u16\n" " C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=Rss32\n" " C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=#s8\n" " C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=#s16\n" " C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsh(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsh(Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsw(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsw(Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddb(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: MAPPING\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vadduh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):crnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):crnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.eq(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.gtu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.eq(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.gt(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.gtu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.eq(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.gt(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.gtu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vconj(Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxb(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxub(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxuh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxuw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmaxw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminb(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminub(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminuh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminuw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vminw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):crnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):crnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vraddub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vraddub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrsadub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrsadub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubb(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: MAPPING\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubuh(Rtt32,Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=xor(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=xor(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=zxtb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=zxth(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=and(Rt32,~Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=and(Rtt32,~Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=bitsplit(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=bitsplit(Rs32,#u5)\n" " C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=boundscheck(Rs32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.eq(Rs32,#u8)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.gt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.gt(Rs32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.gtu(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmpb.gtu(Rs32,#u7)\n" " C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.eq(Rs32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.gt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.gt(Rs32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.gtu(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmph.gtu(Rs32,#u7)\n" " C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=combine(#s8,Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=combine(Rs32,#s8)\n" " C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cround(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cround(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=modwrap(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=or(Rt32,~Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=or(Rtt32,~Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmp.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmp.eq(Rs32,#s8)\n" " C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=!cmp.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=!cmp.eq(Rs32,#s8)\n" " C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=round(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=round(Rs32,#u5):sat\n" " C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=round(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=round(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=tlbmatch(Rss32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=any8(vcmpb.eq(Rss32,Rtt32))\n" " C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.eq(Rss32,#u8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.gt(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.gt(Rss32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpb.gtu(Rss32,#u7)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.eq(Rss32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.gt(Rss32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmph.gtu(Rss32,#u7)\n" " C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.eq(Rss32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.gt(Rss32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=vcmpw.gtu(Rss32,#u7)\n" " C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrmaxh(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrmaxuh(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrmaxuw(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrmaxw(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrminh(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrminuh(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrminuw(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=vrminw(Rss32,Ru32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vaddhub(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=all8(Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_all8_p __builtin_HEXAGON_C2_all8\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Pt4,Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_pp __builtin_HEXAGON_C2_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Pt4,!Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=any8(Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_any8_p __builtin_HEXAGON_C2_any8\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=bitsclr(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=bitsclr(Rs32,#u6)\n" " C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=bitsset(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.eq(Rs32,#s10)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.eq(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.ge(Rs32,#s8)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.geu(Rs32,#u8)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gt(Rs32,#s10)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gt(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gtu(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gtu(Rs32,#u9)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.gtu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.lt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=cmp.ltu(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mask(Pt4)\n" " C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mask_p __builtin_HEXAGON_C2_mask\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mux(Pu4,Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mux(Pu4,#s8,#S8)\n" " C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mux(Pu4,Rs32,#s8)\n" " C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mux(Pu4,#s8,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=not(Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_p __builtin_HEXAGON_C2_not\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Pt4,Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_pp __builtin_HEXAGON_C2_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Pt4,!Ps4)\n" " C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=Ps4\n" " C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps)\n" " Instruction Type: MAPPING\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=Ps4\n" " C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=Rs32\n" " C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vitpack(Ps4,Pt4)\n" " C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmux(Pu4,Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=xor(Ps4,Pt4)\n" " C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Ps4,and(Pt4,Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Ps4,and(Pt4,!Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Ps4,or(Pt4,Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=and(Ps4,or(Pt4,!Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.gt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.gt(Rs32,#s10)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.gtu(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.gtu(Rs32,#u9)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!cmp.eq(Rs32,#s10)\n" " C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU32_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=fastcorner9(Ps4,Pt4)\n" " C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!fastcorner9(Ps4,Pt4)\n" " C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!bitsclr(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!bitsclr(Rs32,#u6)\n" " C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!bitsset(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Ps4,and(Pt4,Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Ps4,and(Pt4,!Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Ps4,or(Pt4,Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=or(Ps4,or(Pt4,!Pu4))\n" " C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu)\n" " Instruction Type: CR\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_d2df(Rss32)\n" " C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_d2sf(Rss32)\n" " C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_df2d(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_df2d(Rss32):chop\n" " C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_df2sf(Rss32)\n" " C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_df2ud(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_df2ud(Rss32):chop\n" " C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_df2uw(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_df2uw(Rss32):chop\n" " C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_df2w(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_df2w(Rss32):chop\n" " C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_sf2d(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_sf2d(Rs32):chop\n" " C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_sf2df(Rs32)\n" " C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_sf2ud(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_sf2ud(Rs32):chop\n" " C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_sf2uw(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_sf2uw(Rs32):chop\n" " C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_sf2w(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_sf2w(Rs32):chop\n" " C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_ud2df(Rss32)\n" " C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_ud2sf(Rss32)\n" " C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_uw2df(Rs32)\n" " C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_uw2sf(Rs32)\n" " C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=convert_w2df(Rs32)\n" " C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=convert_w2sf(Rs32)\n" " C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=dfclass(Rss32,#u5)\n" " C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=dfcmp.eq(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=dfcmp.ge(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=dfcmp.gt(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=dfcmp.uo(Rss32,Rtt32)\n" " C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmake(#u10):neg\n" " C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmake(#u10):pos\n" " C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfadd(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=sfclass(Rs32,#u5)\n" " C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=sfcmp.eq(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=sfcmp.ge(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=sfcmp.gt(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=sfcmp.uo(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sffixupd(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sffixupn(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sffixupr(Rs32)\n" " C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32):lib\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32,Pu4):scale\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32):lib\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfmake(#u10):neg\n" " C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfmake(#u10):pos\n" " C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfmax(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfmin(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfmpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=sfsub(Rs32,Rt32)\n" " C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memb(Rx32++#s4:0:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memb(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=memd(Rx32++#s4:3:circ(Mu2))\n" " C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=memd(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memh(Rx32++#s4:1:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memh(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memw(Rx32++#s4:2:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memw(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memub(Rx32++#s4:0:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memub(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memuh(Rx32++#s4:1:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=memuh(Rx32++I:circ(Mu2))\n" " C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)\n" " Instruction Type: LD\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=add(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=add(Rs32,#s8)\n" " C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyi(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyi(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpy(Rs32,Rt32):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32,Rt32)\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyi(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyi(Rs32,#u8)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyi(Rs32,#u8)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd\n" " C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyi(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyi(Rs32,#m9)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9)\n" " Instruction Type: M\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpysu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyu(Rs32,Rt32)\n" " C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l)\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mpyui(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=add(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=add(Rs32,#s8)\n" " C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=sub(Rt32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsdiffh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsdiffw(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vcmpyi(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vcmpyr(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vraddh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vradduh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcmpys(Rss32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpyh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpyh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=xor(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=and(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=and(Rs32,~Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=or(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=xor(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=mpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,#U6))\n" " C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Ru32,mpyi(Rs32,#u6))\n" " C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Ru32,mpyi(#u6:2,Rs32))\n" " C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,Rt32))\n" " C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Ry32=add(Ru32,mpyi(Ry32,Rs32))\n" " C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpy(Rs32,Rt32):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=and(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=and(Rs32,~Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=or(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=xor(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=pmpyw(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=pmpyw(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vpmpyh(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=vpmpyh(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32):<<1\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=and(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=and(Rs32,~Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=or(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=xor(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vdmpybsu(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vdmpybsu(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpybsu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vmpybu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpybsu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vmpybu(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpybsu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrmpybu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpybsu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrmpybu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=addasl(Rt32,Rs32,#u3)\n" " C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=asl(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asl(Rs32,#u5):sat\n" " C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=asl(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaslh(Rss32,#u4)\n" " C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaslw(Rss32,#u5)\n" " C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=asl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=asl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=asl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=asl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=asl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asl(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaslh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vaslw(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=asr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=asr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=asr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=asr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asr(Rss32,#u6):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asrrnd(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=asr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=asr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=asr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=asr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asr(Rs32,#u5):rnd\n" " C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asrrnd(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vasrw(Rss32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vasrh(Rss32,#u4)\n" " C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vasrw(Rss32,#u5)\n" " C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=asr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=asr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=asr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=asr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=asr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=asr(Rs32,Rt32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vasrw(Rss32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vasrh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vasrw(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=brev(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_brev_R __builtin_HEXAGON_S2_brev\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=brev(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cl0(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cl0(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cl1(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cl1(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=clb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_clb_R __builtin_HEXAGON_S2_clb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=normamt(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=clb(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=clrbit(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=clrbit(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=ct0(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=ct0(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=ct1(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=ct1(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=deinterleave(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=extractu(Rs32,#u5,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=extractu(Rs32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=extractu(Rss32,#u6,#U6)\n" " C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=extractu(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=insert(Rs32,#u5,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=insert(Rs32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=insert(Rss32,#u6,#U6)\n" " C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32=insert(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=interleave(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=lfs(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=lsl(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=lsl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=lsl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=lsl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=lsl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=lsl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlslh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlslw(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=lsr(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=lsr(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlsrh(Rss32,#u4)\n" " C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlsrw(Rss32,#u5)\n" " C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=lsr(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=lsr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=lsr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=lsr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=lsr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=lsr(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlsrh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vlsrw(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=packhl(Rs32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU32_3op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=parity(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=setbit(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=setbit(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=shuffeb(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=shuffeh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=shuffob(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=shuffoh(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memb(Rx32++#s4:0:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memb(Rx32++I:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memd(Rx32++#s4:3:circ(Mu2))=Rtt32\n" " C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memd(Rx32++I:circ(Mu2))=Rtt32\n" " C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32.h\n" " C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32.h\n" " C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memw(Rx32++#s4:2:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: memw(Rx32++I:circ(Mu2))=Rt32\n" " C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)\n" " Instruction Type: ST\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsathb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsathub(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=tableidxb(Rs32,#u4,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=tableidxd(Rs32,#u4,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=tableidxh(Rs32,#u4,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=tableidxw(Rs32,#u4,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=togglebit(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=togglebit(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=tstbit(Rs32,#u5)\n" " C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=tstbit(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,#u3)\n" " C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,Pu4)\n" " C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcnegh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vcrotate(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcnegh(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vrndwh(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vrndwh(Rss32):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsathb(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsathb(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsathub(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsathub(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsatwh(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsatwh(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsatwuh(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsatwuh(Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vsplatb(Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsplath(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,#u3)\n" " C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,Pu4)\n" " C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsxtbh(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsxthw(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vtrunehb(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vtrunewh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vtrunohb(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vtrunowh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vzxtbh(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vzxthw(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rs32,add(Ru32,#s6))\n" " C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=add(#u8,asl(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=add(#u8,lsr(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=and(#u8,asl(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=and(#u8,lsr(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(clb(Rs32),#s6)\n" " C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(clb(Rss32),#s6)\n" " C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=normamt(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=extract(Rs32,#u5,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=extract(Rs32,Rtt32)\n" " C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=extract(Rss32,#u6,#U6)\n" " C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=extract(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=lsl(#s6,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!tstbit(Rs32,#u5)\n" " C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!tstbit(Rs32,Rt32)\n" " C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=and(Rs32,#s10)\n" " C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=or(Ru32,and(Rx32,#s10))\n" " C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=or(Rs32,#s10)\n" " C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=or(#u8,asl(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=or(#u8,lsr(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=parity(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=add(Rs32,sub(#s6,Ru32))\n" " C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=sub(#u8,asl(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32=sub(#u8,lsr(Rx32,#U5))\n" " C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vrcrotate(Rss32,Rt32,#u2)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vrcrotate(Rss32,Rt32,#u2)\n" " C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxaddsubw(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vxsubaddw(Rss32,Rtt32):sat\n" " C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vasrhub(Rss32,#u4):rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vasrhub(Rss32,#u4):sat\n" " C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=popcount(Rss32)\n" " C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vasrh(Rss32,#u4):rnd\n" " C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: dccleana(Rs32)\n" " C Intrinsic Prototype: void Q6_dccleana_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: dccleaninva(Rs32)\n" " C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: dcfetch(Rs32)\n" " C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs)\n" " Instruction Type: MAPPING\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: dcinva(Rs32)\n" " C Intrinsic Prototype: void Q6_dcinva_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: dczeroa(Rs32)\n" " C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: l2fetch(Rs32,Rt32)\n" " C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch\n" "\n" "/* ==========================================================================\n" " Assembly Syntax: l2fetch(Rs32,Rtt32)\n" " C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32&=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32-=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32|=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32^=rol(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32+=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32&=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32|=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32^=rol(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HEXAGON_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsdiffb(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HEXAGON_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vabsdiffub(Rtt32,Rss32)\n" " C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HEXAGON_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vsplatb(Rs32)\n" " C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HEXAGON_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vtrunehb(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HEXAGON_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vtrunohb(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HEXAGON_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32))\n" " C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: ALU64\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HEXAGON_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfadd(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HEXAGON_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfsub(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HEXAGON_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Rx32-=mpyi(Rs32,Rt32)\n" " C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HEXAGON_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=mask(#u5,#U5)\n" " C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_mask_II __builtin_HEXAGON_S2_mask\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=clip(Rs32,#u5)\n" " C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cround(Rss32,#u6)\n" " C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cround(Rss32,Rt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt)\n" " Instruction Type: S_3op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vclip(Rss32,#u5)\n" " C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5)\n" " Instruction Type: S_2op\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmax(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmin(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmpyfix(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=dfmpyhh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=dfmpylh(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=dfmpyll(Rss32,Rtt32)\n" " C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll\n" "#endif /* __HEXAGON_ARCH___ >= 67 */\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32*)\n" " C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rdd32=vdmpyw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rxx32+=vdmpyw(Rss32,Rtt32)\n" " C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat\n" " C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)\n" " Instruction Type: M\n" " Execution Slots: SLOT3\n" " ========================================================================== */\n" "\n" "#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd\n" "#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: dmlink(Rs32,Rt32)\n" " C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=dmpause\n" " C Intrinsic Prototype: Word32 Q6_R_dmpause()\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=dmpoll\n" " C Intrinsic Prototype: Word32 Q6_R_dmpoll()\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: dmresume(Rs32)\n" " C Intrinsic Prototype: void Q6_dmresume_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: dmstart(Rs32)\n" " C Intrinsic Prototype: void Q6_dmstart_A(Address Rs)\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HEXAGON_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=dmwait\n" " C Intrinsic Prototype: Word32 Q6_R_dmwait()\n" " Instruction Type: ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#include \n" "#ifdef __HVX__\n" "#include \n" "#endif /* __HVX__ */\n" "#endif\n" "" } , { "/builtins/hexagon_types.h" , "/******************************************************************************/\n" "/* (c) 2020 Qualcomm Innovation Center, Inc. All rights reserved. */\n" "/* */\n" "/******************************************************************************/\n" "#ifndef HEXAGON_TYPES_H\n" "#define HEXAGON_TYPES_H\n" "\n" "#include \n" "\n" "/* Hexagon names */\n" "#define HEXAGON_Vect HEXAGON_Vect64\n" "#define HEXAGON_V_GET_D HEXAGON_V64_GET_D\n" "#define HEXAGON_V_GET_UD HEXAGON_V64_GET_UD\n" "#define HEXAGON_V_GET_W0 HEXAGON_V64_GET_W0\n" "#define HEXAGON_V_GET_W1 HEXAGON_V64_GET_W1\n" "#define HEXAGON_V_GET_UW0 HEXAGON_V64_GET_UW0\n" "#define HEXAGON_V_GET_UW1 HEXAGON_V64_GET_UW1\n" "#define HEXAGON_V_GET_H0 HEXAGON_V64_GET_H0\n" "#define HEXAGON_V_GET_H1 HEXAGON_V64_GET_H1\n" "#define HEXAGON_V_GET_H2 HEXAGON_V64_GET_H2\n" "#define HEXAGON_V_GET_H3 HEXAGON_V64_GET_H3\n" "#define HEXAGON_V_GET_UH0 HEXAGON_V64_GET_UH0\n" "#define HEXAGON_V_GET_UH1 HEXAGON_V64_GET_UH1\n" "#define HEXAGON_V_GET_UH2 HEXAGON_V64_GET_UH2\n" "#define HEXAGON_V_GET_UH3 HEXAGON_V64_GET_UH3\n" "#define HEXAGON_V_GET_B0 HEXAGON_V64_GET_B0\n" "#define HEXAGON_V_GET_B1 HEXAGON_V64_GET_B1\n" "#define HEXAGON_V_GET_B2 HEXAGON_V64_GET_B2\n" "#define HEXAGON_V_GET_B3 HEXAGON_V64_GET_B3\n" "#define HEXAGON_V_GET_B4 HEXAGON_V64_GET_B4\n" "#define HEXAGON_V_GET_B5 HEXAGON_V64_GET_B5\n" "#define HEXAGON_V_GET_B6 HEXAGON_V64_GET_B6\n" "#define HEXAGON_V_GET_B7 HEXAGON_V64_GET_B7\n" "#define HEXAGON_V_GET_UB0 HEXAGON_V64_GET_UB0\n" "#define HEXAGON_V_GET_UB1 HEXAGON_V64_GET_UB1\n" "#define HEXAGON_V_GET_UB2 HEXAGON_V64_GET_UB2\n" "#define HEXAGON_V_GET_UB3 HEXAGON_V64_GET_UB3\n" "#define HEXAGON_V_GET_UB4 HEXAGON_V64_GET_UB4\n" "#define HEXAGON_V_GET_UB5 HEXAGON_V64_GET_UB5\n" "#define HEXAGON_V_GET_UB6 HEXAGON_V64_GET_UB6\n" "#define HEXAGON_V_GET_UB7 HEXAGON_V64_GET_UB7\n" "#define HEXAGON_V_PUT_D HEXAGON_V64_PUT_D\n" "#define HEXAGON_V_PUT_W0 HEXAGON_V64_PUT_W0\n" "#define HEXAGON_V_PUT_W1 HEXAGON_V64_PUT_W1\n" "#define HEXAGON_V_PUT_H0 HEXAGON_V64_PUT_H0\n" "#define HEXAGON_V_PUT_H1 HEXAGON_V64_PUT_H1\n" "#define HEXAGON_V_PUT_H2 HEXAGON_V64_PUT_H2\n" "#define HEXAGON_V_PUT_H3 HEXAGON_V64_PUT_H3\n" "#define HEXAGON_V_PUT_B0 HEXAGON_V64_PUT_B0\n" "#define HEXAGON_V_PUT_B1 HEXAGON_V64_PUT_B1\n" "#define HEXAGON_V_PUT_B2 HEXAGON_V64_PUT_B2\n" "#define HEXAGON_V_PUT_B3 HEXAGON_V64_PUT_B3\n" "#define HEXAGON_V_PUT_B4 HEXAGON_V64_PUT_B4\n" "#define HEXAGON_V_PUT_B5 HEXAGON_V64_PUT_B5\n" "#define HEXAGON_V_PUT_B6 HEXAGON_V64_PUT_B6\n" "#define HEXAGON_V_PUT_B7 HEXAGON_V64_PUT_B7\n" "#define HEXAGON_V_CREATE_D HEXAGON_V64_CREATE_D\n" "#define HEXAGON_V_CREATE_W HEXAGON_V64_CREATE_W\n" "#define HEXAGON_V_CREATE_H HEXAGON_V64_CREATE_H\n" "#define HEXAGON_V_CREATE_B HEXAGON_V64_CREATE_B\n" "\n" "#ifdef __cplusplus\n" "#define HEXAGON_VectC HEXAGON_Vect64C\n" "#endif /* __cplusplus */\n" "\n" "/* 64 Bit Vectors */\n" "\n" "typedef long long __attribute__((__may_alias__)) HEXAGON_Vect64;\n" "\n" "/* Extract doubleword macros */\n" "\n" "#define HEXAGON_V64_GET_D(v) (v)\n" "#define HEXAGON_V64_GET_UD(v) ((unsigned long long)(v))\n" "\n" "/* Extract word macros */\n" "\n" "#define HEXAGON_V64_GET_W0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.w[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_W1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.w[1]; \\\n" " })\n" "#define HEXAGON_V64_GET_UW0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned int uw[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uw[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_UW1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned int uw[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uw[1]; \\\n" " })\n" "\n" "/* Extract half word macros */\n" "\n" "#define HEXAGON_V64_GET_H0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_H1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[1]; \\\n" " })\n" "#define HEXAGON_V64_GET_H2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[2]; \\\n" " })\n" "#define HEXAGON_V64_GET_H3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[3]; \\\n" " })\n" "#define HEXAGON_V64_GET_UH0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uh[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_UH1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uh[1]; \\\n" " })\n" "#define HEXAGON_V64_GET_UH2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uh[2]; \\\n" " })\n" "#define HEXAGON_V64_GET_UH3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.uh[3]; \\\n" " })\n" "\n" "/* Extract byte macros */\n" "\n" "#define HEXAGON_V64_GET_B0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_B1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[1]; \\\n" " })\n" "#define HEXAGON_V64_GET_B2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[2]; \\\n" " })\n" "#define HEXAGON_V64_GET_B3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[3]; \\\n" " })\n" "#define HEXAGON_V64_GET_B4(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[4]; \\\n" " })\n" "#define HEXAGON_V64_GET_B5(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[5]; \\\n" " })\n" "#define HEXAGON_V64_GET_B6(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[6]; \\\n" " })\n" "#define HEXAGON_V64_GET_B7(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[7]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[0]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[1]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[2]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[3]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB4(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[4]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB5(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[5]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB6(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[6]; \\\n" " })\n" "#define HEXAGON_V64_GET_UB7(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.ub[7]; \\\n" " })\n" "\n" "/* NOTE: All set macros return a HEXAGON_Vect64 type */\n" "\n" "/* Set doubleword macro */\n" "\n" "#define HEXAGON_V64_PUT_D(v, new) (new)\n" "\n" "/* Set word macros */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_PUT_W0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.w[0] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_W1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.w[1] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_PUT_W0(v, new) \\\n" " (((v) & 0xffffffff00000000LL) | ((HEXAGON_Vect64)((unsigned int)(new))))\n" "#define HEXAGON_V64_PUT_W1(v, new) \\\n" " (((v) & 0x00000000ffffffffLL) | (((HEXAGON_Vect64)(new)) << 32LL))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Set half word macros */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_PUT_H0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[0] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_H1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[1] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_H2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[2] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_H3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.h[3] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_PUT_H0(v, new) \\\n" " (((v) & 0xffffffffffff0000LL) | ((HEXAGON_Vect64)((unsigned short)(new))))\n" "#define HEXAGON_V64_PUT_H1(v, new) \\\n" " (((v) & 0xffffffff0000ffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 16LL))\n" "#define HEXAGON_V64_PUT_H2(v, new) \\\n" " (((v) & 0xffff0000ffffffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 32LL))\n" "#define HEXAGON_V64_PUT_H3(v, new) \\\n" " (((v) & 0x0000ffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 48LL))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Set byte macros */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_PUT_B0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[0] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[1] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[2] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[3] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B4(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[4] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B5(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[5] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B6(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[6] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "#define HEXAGON_V64_PUT_B7(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.d = (v); \\\n" " _HEXAGON_V64_internal_union.b[7] = (new); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_PUT_B0(v, new) \\\n" " (((v) & 0xffffffffffffff00LL) | ((HEXAGON_Vect64)((unsigned char)(new))))\n" "#define HEXAGON_V64_PUT_B1(v, new) \\\n" " (((v) & 0xffffffffffff00ffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 8LL))\n" "#define HEXAGON_V64_PUT_B2(v, new) \\\n" " (((v) & 0xffffffffff00ffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 16LL))\n" "#define HEXAGON_V64_PUT_B3(v, new) \\\n" " (((v) & 0xffffffff00ffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 24LL))\n" "#define HEXAGON_V64_PUT_B4(v, new) \\\n" " (((v) & 0xffffff00ffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 32LL))\n" "#define HEXAGON_V64_PUT_B5(v, new) \\\n" " (((v) & 0xffff00ffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 40LL))\n" "#define HEXAGON_V64_PUT_B6(v, new) \\\n" " (((v) & 0xff00ffffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 48LL))\n" "#define HEXAGON_V64_PUT_B7(v, new) \\\n" " (((v) & 0x00ffffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 56LL))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* NOTE: All create macros return a HEXAGON_Vect64 type */\n" "\n" "/* Create from a doubleword */\n" "\n" "#define HEXAGON_V64_CREATE_D(d) (d)\n" "\n" "/* Create from words */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_CREATE_W(w1, w0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.w[0] = (w0); \\\n" " _HEXAGON_V64_internal_union.w[1] = (w1); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_CREATE_W(w1, w0) \\\n" " ((((HEXAGON_Vect64)(w1)) << 32LL) | ((HEXAGON_Vect64)((w0) & 0xffffffff)))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Create from half words */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.h[0] = (h0); \\\n" " _HEXAGON_V64_internal_union.h[1] = (h1); \\\n" " _HEXAGON_V64_internal_union.h[2] = (h2); \\\n" " _HEXAGON_V64_internal_union.h[3] = (h3); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \\\n" " ((((HEXAGON_Vect64)(h3)) << 48LL) | (((HEXAGON_Vect64)((h2) & 0xffff)) << 32LL) | \\\n" " (((HEXAGON_Vect64)((h1) & 0xffff)) << 16LL) | ((HEXAGON_Vect64)((h0) & 0xffff)))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Create from bytes */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _HEXAGON_V64_internal_union; \\\n" " _HEXAGON_V64_internal_union.b[0] = (b0); \\\n" " _HEXAGON_V64_internal_union.b[1] = (b1); \\\n" " _HEXAGON_V64_internal_union.b[2] = (b2); \\\n" " _HEXAGON_V64_internal_union.b[3] = (b3); \\\n" " _HEXAGON_V64_internal_union.b[4] = (b4); \\\n" " _HEXAGON_V64_internal_union.b[5] = (b5); \\\n" " _HEXAGON_V64_internal_union.b[6] = (b6); \\\n" " _HEXAGON_V64_internal_union.b[7] = (b7); \\\n" " _HEXAGON_V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \\\n" " ((((HEXAGON_Vect64)(b7)) << 56LL) | (((HEXAGON_Vect64)((b6) & 0xff)) << 48LL) | \\\n" " (((HEXAGON_Vect64)((b5) & 0xff)) << 40LL) | (((HEXAGON_Vect64)((b4) & 0xff)) << 32LL) | \\\n" " (((HEXAGON_Vect64)((b3) & 0xff)) << 24LL) | (((HEXAGON_Vect64)((b2) & 0xff)) << 16LL) | \\\n" " (((HEXAGON_Vect64)((b1) & 0xff)) << 8LL) | ((HEXAGON_Vect64)((b0) & 0xff)))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "#ifdef __cplusplus\n" "\n" "class HEXAGON_Vect64C {\n" "public:\n" " // Constructors\n" " HEXAGON_Vect64C(long long d = 0) : data(d) {};\n" " HEXAGON_Vect64C(int w1, int w0) : data(HEXAGON_V64_CREATE_W(w1, w0)) {};\n" " HEXAGON_Vect64C(short h3, short h2, short h1, short h0)\n" " : data(HEXAGON_V64_CREATE_H(h3, h2, h1, h0)) {};\n" " HEXAGON_Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,\n" " signed char b3, signed char b2, signed char b1, signed char b0)\n" " : data(HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};\n" " HEXAGON_Vect64C(const HEXAGON_Vect64C &v) : data(v.data) {};\n" "\n" " HEXAGON_Vect64C &operator=(const HEXAGON_Vect64C &v) {\n" " data = v.data;\n" " return *this;\n" " };\n" "\n" " operator long long() {\n" " return data;\n" " };\n" "\n" " // Extract doubleword methods\n" " long long D(void) {\n" " return HEXAGON_V64_GET_D(data);\n" " };\n" " unsigned long long UD(void) {\n" " return HEXAGON_V64_GET_UD(data);\n" " };\n" "\n" " // Extract word methods\n" " int W0(void) {\n" " return HEXAGON_V64_GET_W0(data);\n" " };\n" " int W1(void) {\n" " return HEXAGON_V64_GET_W1(data);\n" " };\n" " unsigned int UW0(void) {\n" " return HEXAGON_V64_GET_UW0(data);\n" " };\n" " unsigned int UW1(void) {\n" " return HEXAGON_V64_GET_UW1(data);\n" " };\n" "\n" " // Extract half word methods\n" " short H0(void) {\n" " return HEXAGON_V64_GET_H0(data);\n" " };\n" " short H1(void) {\n" " return HEXAGON_V64_GET_H1(data);\n" " };\n" " short H2(void) {\n" " return HEXAGON_V64_GET_H2(data);\n" " };\n" " short H3(void) {\n" " return HEXAGON_V64_GET_H3(data);\n" " };\n" " unsigned short UH0(void) {\n" " return HEXAGON_V64_GET_UH0(data);\n" " };\n" " unsigned short UH1(void) {\n" " return HEXAGON_V64_GET_UH1(data);\n" " };\n" " unsigned short UH2(void) {\n" " return HEXAGON_V64_GET_UH2(data);\n" " };\n" " unsigned short UH3(void) {\n" " return HEXAGON_V64_GET_UH3(data);\n" " };\n" "\n" " // Extract byte methods\n" " signed char B0(void) {\n" " return HEXAGON_V64_GET_B0(data);\n" " };\n" " signed char B1(void) {\n" " return HEXAGON_V64_GET_B1(data);\n" " };\n" " signed char B2(void) {\n" " return HEXAGON_V64_GET_B2(data);\n" " };\n" " signed char B3(void) {\n" " return HEXAGON_V64_GET_B3(data);\n" " };\n" " signed char B4(void) {\n" " return HEXAGON_V64_GET_B4(data);\n" " };\n" " signed char B5(void) {\n" " return HEXAGON_V64_GET_B5(data);\n" " };\n" " signed char B6(void) {\n" " return HEXAGON_V64_GET_B6(data);\n" " };\n" " signed char B7(void) {\n" " return HEXAGON_V64_GET_B7(data);\n" " };\n" " unsigned char UB0(void) {\n" " return HEXAGON_V64_GET_UB0(data);\n" " };\n" " unsigned char UB1(void) {\n" " return HEXAGON_V64_GET_UB1(data);\n" " };\n" " unsigned char UB2(void) {\n" " return HEXAGON_V64_GET_UB2(data);\n" " };\n" " unsigned char UB3(void) {\n" " return HEXAGON_V64_GET_UB3(data);\n" " };\n" " unsigned char UB4(void) {\n" " return HEXAGON_V64_GET_UB4(data);\n" " };\n" " unsigned char UB5(void) {\n" " return HEXAGON_V64_GET_UB5(data);\n" " };\n" " unsigned char UB6(void) {\n" " return HEXAGON_V64_GET_UB6(data);\n" " };\n" " unsigned char UB7(void) {\n" " return HEXAGON_V64_GET_UB7(data);\n" " };\n" "\n" " // NOTE: All set methods return a HEXAGON_Vect64C type\n" "\n" " // Set doubleword method\n" " HEXAGON_Vect64C D(long long d) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_D(data, d));\n" " };\n" "\n" " // Set word methods\n" " HEXAGON_Vect64C W0(int w) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_W0(data, w));\n" " };\n" " HEXAGON_Vect64C W1(int w) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_W1(data, w));\n" " };\n" "\n" " // Set half word methods\n" " HEXAGON_Vect64C H0(short h) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_H0(data, h));\n" " };\n" " HEXAGON_Vect64C H1(short h) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_H1(data, h));\n" " };\n" " HEXAGON_Vect64C H2(short h) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_H2(data, h));\n" " };\n" " HEXAGON_Vect64C H3(short h) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_H3(data, h));\n" " };\n" "\n" " // Set byte methods\n" " HEXAGON_Vect64C B0(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B0(data, b));\n" " };\n" " HEXAGON_Vect64C B1(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B1(data, b));\n" " };\n" " HEXAGON_Vect64C B2(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B2(data, b));\n" " };\n" " HEXAGON_Vect64C B3(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B3(data, b));\n" " };\n" " HEXAGON_Vect64C B4(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B4(data, b));\n" " };\n" " HEXAGON_Vect64C B5(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B5(data, b));\n" " };\n" " HEXAGON_Vect64C B6(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B6(data, b));\n" " };\n" " HEXAGON_Vect64C B7(signed char b) {\n" " return HEXAGON_Vect64C(HEXAGON_V64_PUT_B7(data, b));\n" " };\n" "\n" "private:\n" " long long data;\n" "};\n" "\n" "#endif /* __cplusplus */\n" "\n" "/* 32 Bit Vectors */\n" "\n" "typedef int HEXAGON_Vect32;\n" "\n" "/* Extract word macros */\n" "\n" "#define HEXAGON_V32_GET_W(v) (v)\n" "#define HEXAGON_V32_GET_UW(v) ((unsigned int)(v))\n" "\n" "/* Extract half word macros */\n" "\n" "#define HEXAGON_V32_GET_H0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.h[0]; \\\n" " })\n" "#define HEXAGON_V32_GET_H1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.h[1]; \\\n" " })\n" "#define HEXAGON_V32_GET_UH0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned short uh[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.uh[0]; \\\n" " })\n" "#define HEXAGON_V32_GET_UH1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned short uh[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.uh[1]; \\\n" " })\n" "\n" "/* Extract byte macros */\n" "\n" "#define HEXAGON_V32_GET_B0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[0]; \\\n" " })\n" "#define HEXAGON_V32_GET_B1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[1]; \\\n" " })\n" "#define HEXAGON_V32_GET_B2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[2]; \\\n" " })\n" "#define HEXAGON_V32_GET_B3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[3]; \\\n" " })\n" "#define HEXAGON_V32_GET_UB0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.ub[0]; \\\n" " })\n" "#define HEXAGON_V32_GET_UB1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.ub[1]; \\\n" " })\n" "#define HEXAGON_V32_GET_UB2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.ub[2]; \\\n" " })\n" "#define HEXAGON_V32_GET_UB3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.ub[3]; \\\n" " })\n" "\n" "/* NOTE: All set macros return a HEXAGON_Vect32 type */\n" "\n" "/* Set word macro */\n" "\n" "#define HEXAGON_V32_PUT_W(v, new) (new)\n" "\n" "/* Set half word macros */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V32_PUT_H0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.h[0] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "#define HEXAGON_V32_PUT_H1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.h[1] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V32_PUT_H0(v, new) \\\n" " (((v) & 0xffff0000) | ((HEXAGON_Vect32)((unsigned short)(new))))\n" "#define HEXAGON_V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((HEXAGON_Vect32)(new)) << 16))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Set byte macros */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V32_PUT_B0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[0] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "#define HEXAGON_V32_PUT_B1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[1] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "#define HEXAGON_V32_PUT_B2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[2] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "#define HEXAGON_V32_PUT_B3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.w = (v); \\\n" " _HEXAGON_V32_internal_union.b[3] = (new); \\\n" " _HEXAGON_V32_internal_union.w; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V32_PUT_B0(v, new) \\\n" " (((v) & 0xffffff00) | ((HEXAGON_Vect32)((unsigned char)(new))))\n" "#define HEXAGON_V32_PUT_B1(v, new) \\\n" " (((v) & 0xffff00ff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 8))\n" "#define HEXAGON_V32_PUT_B2(v, new) \\\n" " (((v) & 0xff00ffff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 16))\n" "#define HEXAGON_V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((HEXAGON_Vect32)(new)) << 24))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* NOTE: All create macros return a HEXAGON_Vect32 type */\n" "\n" "/* Create from a word */\n" "\n" "#define HEXAGON_V32_CREATE_W(w) (w)\n" "\n" "/* Create from half words */\n" "\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V32_CREATE_H(h1, h0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[2]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.h[0] = (h0); \\\n" " _HEXAGON_V32_internal_union.h[1] = (h1); \\\n" " _HEXAGON_V32_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V32_CREATE_H(h1, h0) \\\n" " ((((HEXAGON_Vect32)(h1)) << 16) | ((HEXAGON_Vect32)((h0) & 0xffff)))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "/* Create from bytes */\n" "#ifdef __hexagon__\n" "\n" "#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[4]; \\\n" " } _HEXAGON_V32_internal_union; \\\n" " _HEXAGON_V32_internal_union.b[0] = (b0); \\\n" " _HEXAGON_V32_internal_union.b[1] = (b1); \\\n" " _HEXAGON_V32_internal_union.b[2] = (b2); \\\n" " _HEXAGON_V32_internal_union.b[3] = (b3); \\\n" " _HEXAGON_V32_internal_union.d; \\\n" " })\n" "\n" "#else /* !__hexagon__ */\n" "\n" "#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \\\n" " ((((HEXAGON_Vect32)(b3)) << 24) | (((HEXAGON_Vect32)((b2) & 0xff)) << 16) | \\\n" " (((HEXAGON_Vect32)((b1) & 0xff)) << 8) | ((HEXAGON_Vect32)((b0) & 0xff)))\n" "\n" "#endif /* !__hexagon__ */\n" "\n" "#ifdef __cplusplus\n" "\n" "class HEXAGON_Vect32C {\n" "public:\n" " // Constructors\n" " HEXAGON_Vect32C(int w = 0) : data(w) {};\n" " HEXAGON_Vect32C(short h1, short h0) : data(HEXAGON_V32_CREATE_H(h1, h0)) {};\n" " HEXAGON_Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)\n" " : data(HEXAGON_V32_CREATE_B(b3, b2, b1, b0)) {};\n" " HEXAGON_Vect32C(const HEXAGON_Vect32C &v) : data(v.data) {};\n" "\n" " HEXAGON_Vect32C &operator=(const HEXAGON_Vect32C &v) {\n" " data = v.data;\n" " return *this;\n" " };\n" "\n" " operator int() {\n" " return data;\n" " };\n" "\n" " // Extract word methods\n" " int W(void) {\n" " return HEXAGON_V32_GET_W(data);\n" " };\n" " unsigned int UW(void) {\n" " return HEXAGON_V32_GET_UW(data);\n" " };\n" "\n" " // Extract half word methods\n" " short H0(void) {\n" " return HEXAGON_V32_GET_H0(data);\n" " };\n" " short H1(void) {\n" " return HEXAGON_V32_GET_H1(data);\n" " };\n" " unsigned short UH0(void) {\n" " return HEXAGON_V32_GET_UH0(data);\n" " };\n" " unsigned short UH1(void) {\n" " return HEXAGON_V32_GET_UH1(data);\n" " };\n" "\n" " // Extract byte methods\n" " signed char B0(void) {\n" " return HEXAGON_V32_GET_B0(data);\n" " };\n" " signed char B1(void) {\n" " return HEXAGON_V32_GET_B1(data);\n" " };\n" " signed char B2(void) {\n" " return HEXAGON_V32_GET_B2(data);\n" " };\n" " signed char B3(void) {\n" " return HEXAGON_V32_GET_B3(data);\n" " };\n" " unsigned char UB0(void) {\n" " return HEXAGON_V32_GET_UB0(data);\n" " };\n" " unsigned char UB1(void) {\n" " return HEXAGON_V32_GET_UB1(data);\n" " };\n" " unsigned char UB2(void) {\n" " return HEXAGON_V32_GET_UB2(data);\n" " };\n" " unsigned char UB3(void) {\n" " return HEXAGON_V32_GET_UB3(data);\n" " };\n" "\n" " // NOTE: All set methods return a HEXAGON_Vect32C type\n" "\n" " // Set word method\n" " HEXAGON_Vect32C W(int w) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_W(data, w));\n" " };\n" "\n" " // Set half word methods\n" " HEXAGON_Vect32C H0(short h) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_H0(data, h));\n" " };\n" " HEXAGON_Vect32C H1(short h) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_H1(data, h));\n" " };\n" "\n" " // Set byte methods\n" " HEXAGON_Vect32C B0(signed char b) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_B0(data, b));\n" " };\n" " HEXAGON_Vect32C B1(signed char b) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_B1(data, b));\n" " };\n" " HEXAGON_Vect32C B2(signed char b) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_B2(data, b));\n" " };\n" " HEXAGON_Vect32C B3(signed char b) {\n" " return HEXAGON_Vect32C(HEXAGON_V32_PUT_B3(data, b));\n" " };\n" "\n" "private:\n" " int data;\n" "};\n" "\n" "#endif /* __cplusplus */\n" "\n" "// V65 Vector types\n" "#if __HVX_ARCH__ >= 65\n" "#if defined __HVX__ && (__HVX_LENGTH__ == 128)\n" " typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" " typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" " typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))\n" " __attribute__((aligned(256)));\n" "\n" " typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(4)));\n" "\n" " typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))\n" " __attribute__((aligned(4)));\n" "\n" " #define HVX_VectorPred HEXAGON_VecPred128\n" " #define HVX_Vector HEXAGON_Vect1024\n" " #define HVX_VectorPair HEXAGON_Vect2048\n" " #define HVX_UVector HEXAGON_UVect1024\n" " #define HVX_UVectorPair HEXAGON_UVect2048\n" "#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */\n" "#if defined __HVX__ && (__HVX_LENGTH__ == 64)\n" " typedef long HEXAGON_VecPred64 __attribute__((__vector_size__(64)))\n" " __attribute__((aligned(64)));\n" "\n" " typedef long HEXAGON_Vect512 __attribute__((__vector_size__(64)))\n" " __attribute__((aligned(64)));\n" "\n" " typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" " typedef long HEXAGON_UVect512 __attribute__((__vector_size__(64)))\n" " __attribute__((aligned(4)));\n" "\n" " typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(4)));\n" "\n" " #define HVX_VectorPred HEXAGON_VecPred64\n" " #define HVX_Vector HEXAGON_Vect512\n" " #define HVX_VectorPair HEXAGON_Vect1024\n" " #define HVX_UVector HEXAGON_UVect512\n" " #define HVX_UVectorPair HEXAGON_UVect1024\n" "#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */\n" "#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */\n" "#endif /* __HVX_ARCH__ >= 65 */\n" "\n" "/* Predicates */\n" "\n" "typedef int HEXAGON_Pred;\n" "\n" "/***\n" " *** backward compatibility aliases\n" " ***/\n" "\n" "/* Old names */\n" "#define Q6Vect Q6Vect64\n" "#define Q6V_GET_D Q6V64_GET_D\n" "#define Q6V_GET_UD Q6V64_GET_UD\n" "#define Q6V_GET_W0 Q6V64_GET_W0\n" "#define Q6V_GET_W1 Q6V64_GET_W1\n" "#define Q6V_GET_UW0 Q6V64_GET_UW0\n" "#define Q6V_GET_UW1 Q6V64_GET_UW1\n" "#define Q6V_GET_H0 Q6V64_GET_H0\n" "#define Q6V_GET_H1 Q6V64_GET_H1\n" "#define Q6V_GET_H2 Q6V64_GET_H2\n" "#define Q6V_GET_H3 Q6V64_GET_H3\n" "#define Q6V_GET_UH0 Q6V64_GET_UH0\n" "#define Q6V_GET_UH1 Q6V64_GET_UH1\n" "#define Q6V_GET_UH2 Q6V64_GET_UH2\n" "#define Q6V_GET_UH3 Q6V64_GET_UH3\n" "#define Q6V_GET_B0 Q6V64_GET_B0\n" "#define Q6V_GET_B1 Q6V64_GET_B1\n" "#define Q6V_GET_B2 Q6V64_GET_B2\n" "#define Q6V_GET_B3 Q6V64_GET_B3\n" "#define Q6V_GET_B4 Q6V64_GET_B4\n" "#define Q6V_GET_B5 Q6V64_GET_B5\n" "#define Q6V_GET_B6 Q6V64_GET_B6\n" "#define Q6V_GET_B7 Q6V64_GET_B7\n" "#define Q6V_GET_UB0 Q6V64_GET_UB0\n" "#define Q6V_GET_UB1 Q6V64_GET_UB1\n" "#define Q6V_GET_UB2 Q6V64_GET_UB2\n" "#define Q6V_GET_UB3 Q6V64_GET_UB3\n" "#define Q6V_GET_UB4 Q6V64_GET_UB4\n" "#define Q6V_GET_UB5 Q6V64_GET_UB5\n" "#define Q6V_GET_UB6 Q6V64_GET_UB6\n" "#define Q6V_GET_UB7 Q6V64_GET_UB7\n" "#define Q6V_PUT_D Q6V64_PUT_D\n" "#define Q6V_PUT_W0 Q6V64_PUT_W0\n" "#define Q6V_PUT_W1 Q6V64_PUT_W1\n" "#define Q6V_PUT_H0 Q6V64_PUT_H0\n" "#define Q6V_PUT_H1 Q6V64_PUT_H1\n" "#define Q6V_PUT_H2 Q6V64_PUT_H2\n" "#define Q6V_PUT_H3 Q6V64_PUT_H3\n" "#define Q6V_PUT_B0 Q6V64_PUT_B0\n" "#define Q6V_PUT_B1 Q6V64_PUT_B1\n" "#define Q6V_PUT_B2 Q6V64_PUT_B2\n" "#define Q6V_PUT_B3 Q6V64_PUT_B3\n" "#define Q6V_PUT_B4 Q6V64_PUT_B4\n" "#define Q6V_PUT_B5 Q6V64_PUT_B5\n" "#define Q6V_PUT_B6 Q6V64_PUT_B6\n" "#define Q6V_PUT_B7 Q6V64_PUT_B7\n" "#define Q6V_CREATE_D Q6V64_CREATE_D\n" "#define Q6V_CREATE_W Q6V64_CREATE_W\n" "#define Q6V_CREATE_H Q6V64_CREATE_H\n" "#define Q6V_CREATE_B Q6V64_CREATE_B\n" "\n" "#ifdef __cplusplus\n" "#define Q6VectC Q6Vect64C\n" "#endif /* __cplusplus */\n" "\n" "/* 64 Bit Vectors */\n" "\n" "typedef long long __attribute__((__may_alias__)) Q6Vect64;\n" "\n" "/* Extract doubleword macros */\n" "\n" "#define Q6V64_GET_D(v) (v)\n" "#define Q6V64_GET_UD(v) ((unsigned long long)(v))\n" "\n" "/* Extract word macros */\n" "\n" "#define Q6V64_GET_W0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.w[0]; \\\n" " })\n" "#define Q6V64_GET_W1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.w[1]; \\\n" " })\n" "#define Q6V64_GET_UW0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned int uw[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uw[0]; \\\n" " })\n" "#define Q6V64_GET_UW1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned int uw[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uw[1]; \\\n" " })\n" "\n" "/* Extract half word macros */\n" "\n" "#define Q6V64_GET_H0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[0]; \\\n" " })\n" "#define Q6V64_GET_H1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[1]; \\\n" " })\n" "#define Q6V64_GET_H2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[2]; \\\n" " })\n" "#define Q6V64_GET_H3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[3]; \\\n" " })\n" "#define Q6V64_GET_UH0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uh[0]; \\\n" " })\n" "#define Q6V64_GET_UH1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uh[1]; \\\n" " })\n" "#define Q6V64_GET_UH2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uh[2]; \\\n" " })\n" "#define Q6V64_GET_UH3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned short uh[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.uh[3]; \\\n" " })\n" "\n" "/* Extract byte macros */\n" "\n" "#define Q6V64_GET_B0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[0]; \\\n" " })\n" "#define Q6V64_GET_B1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[1]; \\\n" " })\n" "#define Q6V64_GET_B2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[2]; \\\n" " })\n" "#define Q6V64_GET_B3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[3]; \\\n" " })\n" "#define Q6V64_GET_B4(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[4]; \\\n" " })\n" "#define Q6V64_GET_B5(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[5]; \\\n" " })\n" "#define Q6V64_GET_B6(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[6]; \\\n" " })\n" "#define Q6V64_GET_B7(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " signed char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[7]; \\\n" " })\n" "#define Q6V64_GET_UB0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[0]; \\\n" " })\n" "#define Q6V64_GET_UB1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[1]; \\\n" " })\n" "#define Q6V64_GET_UB2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[2]; \\\n" " })\n" "#define Q6V64_GET_UB3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[3]; \\\n" " })\n" "#define Q6V64_GET_UB4(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[4]; \\\n" " })\n" "#define Q6V64_GET_UB5(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[5]; \\\n" " })\n" "#define Q6V64_GET_UB6(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[6]; \\\n" " })\n" "#define Q6V64_GET_UB7(v) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " unsigned char ub[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.ub[7]; \\\n" " })\n" "\n" "/* NOTE: All set macros return a Q6Vect64 type */\n" "\n" "/* Set doubleword macro */\n" "\n" "#define Q6V64_PUT_D(v, new) (new)\n" "\n" "/* Set word macros */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_PUT_W0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.w[0] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_W1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.w[1] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_PUT_W0(v, new) \\\n" " (((v) & 0xffffffff00000000LL) | ((Q6Vect64)((unsigned int)(new))))\n" "#define Q6V64_PUT_W1(v, new) \\\n" " (((v) & 0x00000000ffffffffLL) | (((Q6Vect64)(new)) << 32LL))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Set half word macros */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_PUT_H0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[0] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_H1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[1] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_H2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[2] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_H3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.h[3] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_PUT_H0(v, new) \\\n" " (((v) & 0xffffffffffff0000LL) | ((Q6Vect64)((unsigned short)(new))))\n" "#define Q6V64_PUT_H1(v, new) \\\n" " (((v) & 0xffffffff0000ffffLL) | (((Q6Vect64)((unsigned short)(new))) << 16LL))\n" "#define Q6V64_PUT_H2(v, new) \\\n" " (((v) & 0xffff0000ffffffffLL) | (((Q6Vect64)((unsigned short)(new))) << 32LL))\n" "#define Q6V64_PUT_H3(v, new) \\\n" " (((v) & 0x0000ffffffffffffLL) | (((Q6Vect64)(new)) << 48LL))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Set byte macros */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_PUT_B0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[0] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[1] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[2] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[3] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B4(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[4] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B5(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[5] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B6(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[6] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "#define Q6V64_PUT_B7(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.d = (v); \\\n" " _Q6V64_internal_union.b[7] = (new); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_PUT_B0(v, new) \\\n" " (((v) & 0xffffffffffffff00LL) | ((Q6Vect64)((unsigned char)(new))))\n" "#define Q6V64_PUT_B1(v, new) \\\n" " (((v) & 0xffffffffffff00ffLL) | (((Q6Vect64)((unsigned char)(new))) << 8LL))\n" "#define Q6V64_PUT_B2(v, new) \\\n" " (((v) & 0xffffffffff00ffffLL) | (((Q6Vect64)((unsigned char)(new))) << 16LL))\n" "#define Q6V64_PUT_B3(v, new) \\\n" " (((v) & 0xffffffff00ffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 24LL))\n" "#define Q6V64_PUT_B4(v, new) \\\n" " (((v) & 0xffffff00ffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 32LL))\n" "#define Q6V64_PUT_B5(v, new) \\\n" " (((v) & 0xffff00ffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 40LL))\n" "#define Q6V64_PUT_B6(v, new) \\\n" " (((v) & 0xff00ffffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 48LL))\n" "#define Q6V64_PUT_B7(v, new) \\\n" " (((v) & 0x00ffffffffffffffLL) | (((Q6Vect64)(new)) << 56LL))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* NOTE: All create macros return a Q6Vect64 type */\n" "\n" "/* Create from a doubleword */\n" "\n" "#define Q6V64_CREATE_D(d) (d)\n" "\n" "/* Create from words */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_CREATE_W(w1, w0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " int w[2]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.w[0] = (w0); \\\n" " _Q6V64_internal_union.w[1] = (w1); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_CREATE_W(w1, w0) \\\n" " ((((Q6Vect64)(w1)) << 32LL) | ((Q6Vect64)((w0) & 0xffffffff)))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Create from half words */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_CREATE_H(h3, h2, h1, h0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[4]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.h[0] = (h0); \\\n" " _Q6V64_internal_union.h[1] = (h1); \\\n" " _Q6V64_internal_union.h[2] = (h2); \\\n" " _Q6V64_internal_union.h[3] = (h3); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_CREATE_H(h3, h2, h1, h0) \\\n" " ((((Q6Vect64)(h3)) << 48LL) | (((Q6Vect64)((h2) & 0xffff)) << 32LL) | \\\n" " (((Q6Vect64)((h1) & 0xffff)) << 16LL) | ((Q6Vect64)((h0) & 0xffff)))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Create from bytes */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[8]; \\\n" " } _Q6V64_internal_union; \\\n" " _Q6V64_internal_union.b[0] = (b0); \\\n" " _Q6V64_internal_union.b[1] = (b1); \\\n" " _Q6V64_internal_union.b[2] = (b2); \\\n" " _Q6V64_internal_union.b[3] = (b3); \\\n" " _Q6V64_internal_union.b[4] = (b4); \\\n" " _Q6V64_internal_union.b[5] = (b5); \\\n" " _Q6V64_internal_union.b[6] = (b6); \\\n" " _Q6V64_internal_union.b[7] = (b7); \\\n" " _Q6V64_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \\\n" " ((((Q6Vect64)(b7)) << 56LL) | (((Q6Vect64)((b6) & 0xff)) << 48LL) | \\\n" " (((Q6Vect64)((b5) & 0xff)) << 40LL) | (((Q6Vect64)((b4) & 0xff)) << 32LL) | \\\n" " (((Q6Vect64)((b3) & 0xff)) << 24LL) | (((Q6Vect64)((b2) & 0xff)) << 16LL) | \\\n" " (((Q6Vect64)((b1) & 0xff)) << 8LL) | ((Q6Vect64)((b0) & 0xff)))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "#ifdef __cplusplus\n" "\n" "class Q6Vect64C {\n" "public:\n" " // Constructors\n" " Q6Vect64C(long long d = 0) : data(d) {};\n" " Q6Vect64C(int w1, int w0) : data(Q6V64_CREATE_W(w1, w0)) {};\n" " Q6Vect64C(short h3, short h2, short h1, short h0)\n" " : data(Q6V64_CREATE_H(h3, h2, h1, h0)) {};\n" " Q6Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,\n" " signed char b3, signed char b2, signed char b1, signed char b0)\n" " : data(Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};\n" " Q6Vect64C(const Q6Vect64C &v) : data(v.data) {};\n" "\n" " Q6Vect64C &operator=(const Q6Vect64C &v) {\n" " data = v.data;\n" " return *this;\n" " };\n" "\n" " operator long long() {\n" " return data;\n" " };\n" "\n" " // Extract doubleword methods\n" " long long D(void) {\n" " return Q6V64_GET_D(data);\n" " };\n" " unsigned long long UD(void) {\n" " return Q6V64_GET_UD(data);\n" " };\n" "\n" " // Extract word methods\n" " int W0(void) {\n" " return Q6V64_GET_W0(data);\n" " };\n" " int W1(void) {\n" " return Q6V64_GET_W1(data);\n" " };\n" " unsigned int UW0(void) {\n" " return Q6V64_GET_UW0(data);\n" " };\n" " unsigned int UW1(void) {\n" " return Q6V64_GET_UW1(data);\n" " };\n" "\n" " // Extract half word methods\n" " short H0(void) {\n" " return Q6V64_GET_H0(data);\n" " };\n" " short H1(void) {\n" " return Q6V64_GET_H1(data);\n" " };\n" " short H2(void) {\n" " return Q6V64_GET_H2(data);\n" " };\n" " short H3(void) {\n" " return Q6V64_GET_H3(data);\n" " };\n" " unsigned short UH0(void) {\n" " return Q6V64_GET_UH0(data);\n" " };\n" " unsigned short UH1(void) {\n" " return Q6V64_GET_UH1(data);\n" " };\n" " unsigned short UH2(void) {\n" " return Q6V64_GET_UH2(data);\n" " };\n" " unsigned short UH3(void) {\n" " return Q6V64_GET_UH3(data);\n" " };\n" "\n" " // Extract byte methods\n" " signed char B0(void) {\n" " return Q6V64_GET_B0(data);\n" " };\n" " signed char B1(void) {\n" " return Q6V64_GET_B1(data);\n" " };\n" " signed char B2(void) {\n" " return Q6V64_GET_B2(data);\n" " };\n" " signed char B3(void) {\n" " return Q6V64_GET_B3(data);\n" " };\n" " signed char B4(void) {\n" " return Q6V64_GET_B4(data);\n" " };\n" " signed char B5(void) {\n" " return Q6V64_GET_B5(data);\n" " };\n" " signed char B6(void) {\n" " return Q6V64_GET_B6(data);\n" " };\n" " signed char B7(void) {\n" " return Q6V64_GET_B7(data);\n" " };\n" " unsigned char UB0(void) {\n" " return Q6V64_GET_UB0(data);\n" " };\n" " unsigned char UB1(void) {\n" " return Q6V64_GET_UB1(data);\n" " };\n" " unsigned char UB2(void) {\n" " return Q6V64_GET_UB2(data);\n" " };\n" " unsigned char UB3(void) {\n" " return Q6V64_GET_UB3(data);\n" " };\n" " unsigned char UB4(void) {\n" " return Q6V64_GET_UB4(data);\n" " };\n" " unsigned char UB5(void) {\n" " return Q6V64_GET_UB5(data);\n" " };\n" " unsigned char UB6(void) {\n" " return Q6V64_GET_UB6(data);\n" " };\n" " unsigned char UB7(void) {\n" " return Q6V64_GET_UB7(data);\n" " };\n" "\n" " // NOTE: All set methods return a Q6Vect64C type\n" "\n" " // Set doubleword method\n" " Q6Vect64C D(long long d) {\n" " return Q6Vect64C(Q6V64_PUT_D(data, d));\n" " };\n" "\n" " // Set word methods\n" " Q6Vect64C W0(int w) {\n" " return Q6Vect64C(Q6V64_PUT_W0(data, w));\n" " };\n" " Q6Vect64C W1(int w) {\n" " return Q6Vect64C(Q6V64_PUT_W1(data, w));\n" " };\n" "\n" " // Set half word methods\n" " Q6Vect64C H0(short h) {\n" " return Q6Vect64C(Q6V64_PUT_H0(data, h));\n" " };\n" " Q6Vect64C H1(short h) {\n" " return Q6Vect64C(Q6V64_PUT_H1(data, h));\n" " };\n" " Q6Vect64C H2(short h) {\n" " return Q6Vect64C(Q6V64_PUT_H2(data, h));\n" " };\n" " Q6Vect64C H3(short h) {\n" " return Q6Vect64C(Q6V64_PUT_H3(data, h));\n" " };\n" "\n" " // Set byte methods\n" " Q6Vect64C B0(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B0(data, b));\n" " };\n" " Q6Vect64C B1(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B1(data, b));\n" " };\n" " Q6Vect64C B2(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B2(data, b));\n" " };\n" " Q6Vect64C B3(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B3(data, b));\n" " };\n" " Q6Vect64C B4(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B4(data, b));\n" " };\n" " Q6Vect64C B5(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B5(data, b));\n" " };\n" " Q6Vect64C B6(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B6(data, b));\n" " };\n" " Q6Vect64C B7(signed char b) {\n" " return Q6Vect64C(Q6V64_PUT_B7(data, b));\n" " };\n" "\n" "private:\n" " long long data;\n" "};\n" "\n" "#endif /* __cplusplus */\n" "\n" "/* 32 Bit Vectors */\n" "\n" "typedef int Q6Vect32;\n" "\n" "/* Extract word macros */\n" "\n" "#define Q6V32_GET_W(v) (v)\n" "#define Q6V32_GET_UW(v) ((unsigned int)(v))\n" "\n" "/* Extract half word macros */\n" "\n" "#define Q6V32_GET_H0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.h[0]; \\\n" " })\n" "#define Q6V32_GET_H1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.h[1]; \\\n" " })\n" "#define Q6V32_GET_UH0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned short uh[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.uh[0]; \\\n" " })\n" "#define Q6V32_GET_UH1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned short uh[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.uh[1]; \\\n" " })\n" "\n" "/* Extract byte macros */\n" "\n" "#define Q6V32_GET_B0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[0]; \\\n" " })\n" "#define Q6V32_GET_B1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[1]; \\\n" " })\n" "#define Q6V32_GET_B2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[2]; \\\n" " })\n" "#define Q6V32_GET_B3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " signed char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[3]; \\\n" " })\n" "#define Q6V32_GET_UB0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.ub[0]; \\\n" " })\n" "#define Q6V32_GET_UB1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.ub[1]; \\\n" " })\n" "#define Q6V32_GET_UB2(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.ub[2]; \\\n" " })\n" "#define Q6V32_GET_UB3(v) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " unsigned char ub[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.ub[3]; \\\n" " })\n" "\n" "/* NOTE: All set macros return a Q6Vect32 type */\n" "\n" "/* Set word macro */\n" "\n" "#define Q6V32_PUT_W(v, new) (new)\n" "\n" "/* Set half word macros */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V32_PUT_H0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.h[0] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "#define Q6V32_PUT_H1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " short h[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.h[1] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V32_PUT_H0(v, new) \\\n" " (((v) & 0xffff0000) | ((Q6Vect32)((unsigned short)(new))))\n" "#define Q6V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((Q6Vect32)(new)) << 16))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Set byte macros */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V32_PUT_B0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[0] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "#define Q6V32_PUT_B1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[1] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "#define Q6V32_PUT_B2(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[2] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "#define Q6V32_PUT_B3(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " int w; \\\n" " char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.w = (v); \\\n" " _Q6V32_internal_union.b[3] = (new); \\\n" " _Q6V32_internal_union.w; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V32_PUT_B0(v, new) \\\n" " (((v) & 0xffffff00) | ((Q6Vect32)((unsigned char)(new))))\n" "#define Q6V32_PUT_B1(v, new) \\\n" " (((v) & 0xffff00ff) | (((Q6Vect32)((unsigned char)(new))) << 8))\n" "#define Q6V32_PUT_B2(v, new) \\\n" " (((v) & 0xff00ffff) | (((Q6Vect32)((unsigned char)(new))) << 16))\n" "#define Q6V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((Q6Vect32)(new)) << 24))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* NOTE: All create macros return a Q6Vect32 type */\n" "\n" "/* Create from a word */\n" "\n" "#define Q6V32_CREATE_W(w) (w)\n" "\n" "/* Create from half words */\n" "\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V32_CREATE_H(h1, h0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " short h[2]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.h[0] = (h0); \\\n" " _Q6V32_internal_union.h[1] = (h1); \\\n" " _Q6V32_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V32_CREATE_H(h1, h0) \\\n" " ((((Q6Vect32)(h1)) << 16) | ((Q6Vect32)((h0) & 0xffff)))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "/* Create from bytes */\n" "#ifdef __qdsp6__\n" "\n" "#define Q6V32_CREATE_B(b3, b2, b1, b0) \\\n" " __extension__({ \\\n" " union { \\\n" " long long d; \\\n" " char b[4]; \\\n" " } _Q6V32_internal_union; \\\n" " _Q6V32_internal_union.b[0] = (b0); \\\n" " _Q6V32_internal_union.b[1] = (b1); \\\n" " _Q6V32_internal_union.b[2] = (b2); \\\n" " _Q6V32_internal_union.b[3] = (b3); \\\n" " _Q6V32_internal_union.d; \\\n" " })\n" "\n" "#else /* !__qdsp6__ */\n" "\n" "#define Q6V32_CREATE_B(b3, b2, b1, b0) \\\n" " ((((Q6Vect32)(b3)) << 24) | (((Q6Vect32)((b2) & 0xff)) << 16) | \\\n" " (((Q6Vect32)((b1) & 0xff)) << 8) | ((Q6Vect32)((b0) & 0xff)))\n" "\n" "#endif /* !__qdsp6__ */\n" "\n" "#ifdef __cplusplus\n" "\n" "class Q6Vect32C {\n" "public:\n" " // Constructors\n" " Q6Vect32C(int w = 0) : data(w) {};\n" " Q6Vect32C(short h1, short h0) : data(Q6V32_CREATE_H(h1, h0)) {};\n" " Q6Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)\n" " : data(Q6V32_CREATE_B(b3, b2, b1, b0)) {};\n" " Q6Vect32C(const Q6Vect32C &v) : data(v.data) {};\n" "\n" " Q6Vect32C &operator=(const Q6Vect32C &v) {\n" " data = v.data;\n" " return *this;\n" " };\n" "\n" " operator int() {\n" " return data;\n" " };\n" "\n" " // Extract word methods\n" " int W(void) {\n" " return Q6V32_GET_W(data);\n" " };\n" " unsigned int UW(void) {\n" " return Q6V32_GET_UW(data);\n" " };\n" "\n" " // Extract half word methods\n" " short H0(void) {\n" " return Q6V32_GET_H0(data);\n" " };\n" " short H1(void) {\n" " return Q6V32_GET_H1(data);\n" " };\n" " unsigned short UH0(void) {\n" " return Q6V32_GET_UH0(data);\n" " };\n" " unsigned short UH1(void) {\n" " return Q6V32_GET_UH1(data);\n" " };\n" "\n" " // Extract byte methods\n" " signed char B0(void) {\n" " return Q6V32_GET_B0(data);\n" " };\n" " signed char B1(void) {\n" " return Q6V32_GET_B1(data);\n" " };\n" " signed char B2(void) {\n" " return Q6V32_GET_B2(data);\n" " };\n" " signed char B3(void) {\n" " return Q6V32_GET_B3(data);\n" " };\n" " unsigned char UB0(void) {\n" " return Q6V32_GET_UB0(data);\n" " };\n" " unsigned char UB1(void) {\n" " return Q6V32_GET_UB1(data);\n" " };\n" " unsigned char UB2(void) {\n" " return Q6V32_GET_UB2(data);\n" " };\n" " unsigned char UB3(void) {\n" " return Q6V32_GET_UB3(data);\n" " };\n" "\n" " // NOTE: All set methods return a Q6Vect32C type\n" "\n" " // Set word method\n" " Q6Vect32C W(int w) {\n" " return Q6Vect32C(Q6V32_PUT_W(data, w));\n" " };\n" "\n" " // Set half word methods\n" " Q6Vect32C H0(short h) {\n" " return Q6Vect32C(Q6V32_PUT_H0(data, h));\n" " };\n" " Q6Vect32C H1(short h) {\n" " return Q6Vect32C(Q6V32_PUT_H1(data, h));\n" " };\n" "\n" " // Set byte methods\n" " Q6Vect32C B0(signed char b) {\n" " return Q6Vect32C(Q6V32_PUT_B0(data, b));\n" " };\n" " Q6Vect32C B1(signed char b) {\n" " return Q6Vect32C(Q6V32_PUT_B1(data, b));\n" " };\n" " Q6Vect32C B2(signed char b) {\n" " return Q6Vect32C(Q6V32_PUT_B2(data, b));\n" " };\n" " Q6Vect32C B3(signed char b) {\n" " return Q6Vect32C(Q6V32_PUT_B3(data, b));\n" " };\n" "\n" "private:\n" " int data;\n" "};\n" "\n" "#endif /* __cplusplus */\n" "\n" "// V65 Vector types\n" "#if __HVX_ARCH__ >= 65\n" "#if defined __HVX__ && (__HVX_LENGTH__ == 128)\n" "typedef long Q6VecPred128 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" "typedef long Q6Vect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" "typedef long Q6Vect2048 __attribute__((__vector_size__(256)))\n" " __attribute__((aligned(256)));\n" "\n" "#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */\n" "#if defined __HVX__ && (__HVX_LENGTH__ == 64)\n" "typedef long Q6VecPred64 __attribute__((__vector_size__(64)))\n" " __attribute__((aligned(64)));\n" "\n" "typedef long Q6Vect512 __attribute__((__vector_size__(64)))\n" " __attribute__((aligned(64)));\n" "\n" "typedef long Q6Vect1024 __attribute__((__vector_size__(128)))\n" " __attribute__((aligned(128)));\n" "\n" "#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */\n" "#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */\n" "#endif /* __HVX_ARCH__ >= 65 */\n" "\n" "/* Predicates */\n" "\n" "typedef int Q6Pred;\n" "\n" "\n" "#ifdef __HVX__\n" "\n" "// Extract HVX VectorPair macro.\n" "#define HEXAGON_HVX_GET_W(v) (v)\n" "\n" "// Extract HVX Vector macros.\n" "#define HEXAGON_HVX_GET_V0(v) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_Vector V[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.V[0]; \\\n" " })\n" "#define HEXAGON_HVX_GET_V1(v) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_Vector V[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.V[1]; \\\n" " })\n" "#define HEXAGON_HVX_GET_P(v) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_VectorPred P[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.P[0]; \\\n" " })\n" "\n" "// Set HVX VectorPair macro.\n" "#define HEXAGON_HVX_PUT_W(v, new) (new)\n" "\n" "// Set HVX Vector macros.\n" "#define HEXAGON_HVX_PUT_V0(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_Vector V[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.V[0] = (new); \\\n" " _HEXAGON_HVX_internal_union.W; \\\n" " })\n" "\n" "#define HEXAGON_HVX_PUT_V1(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_Vector V[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.V[1] = (new); \\\n" " _HEXAGON_HVX_internal_union.W; \\\n" " })\n" "\n" "#define HEXAGON_HVX_PUT_P(v, new) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_VectorPred P[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.W = (v); \\\n" " _HEXAGON_HVX_internal_union.P[0] = (new); \\\n" " _HEXAGON_HVX_internal_union.W; \\\n" " })\n" "\n" "\n" "#define HEXAGON_HVX_CREATE_W(v1, v0) \\\n" " __extension__({ \\\n" " union { \\\n" " HVX_VectorPair W; \\\n" " HVX_Vector V[2]; \\\n" " } _HEXAGON_HVX_internal_union; \\\n" " _HEXAGON_HVX_internal_union.V[0] = (v0); \\\n" " _HEXAGON_HVX_internal_union.V[1] = (v1); \\\n" " _HEXAGON_HVX_internal_union.W; \\\n" " })\n" "\n" "#ifdef __cplusplus\n" "\n" "class HVX_Vect {\n" "public:\n" " // Constructors.\n" " // Default.\n" " HVX_Vect() : data(Q6_W_vcombine_VV(Q6_V_vzero(), Q6_V_vzero())){};\n" "\n" " // Custom constructors.\n" " HVX_Vect(HVX_VectorPair W) : data(W){};\n" " HVX_Vect(HVX_Vector v1, HVX_Vector v0) : data(HEXAGON_HVX_CREATE_W(v1, v0)){};\n" "\n" " // Copy constructor.\n" " HVX_Vect(const HVX_Vect &W) = default;\n" "\n" " // Move constructor.\n" " HVX_Vect(HVX_Vect &&W) = default;\n" "\n" " // Assignment operator.\n" " HVX_Vect &operator=(const HVX_Vect &W) = default;\n" "\n" " operator HVX_VectorPair() { return data; };\n" "\n" " // Extract VectorPair method.\n" " HVX_VectorPair W(void) { return HEXAGON_HVX_GET_W(data); };\n" "\n" " // Extract Vector methods.\n" " HVX_Vector V0(void) { return HEXAGON_HVX_GET_V0(data); };\n" " HVX_Vector V1(void) { return HEXAGON_HVX_GET_V1(data); };\n" " HVX_VectorPred P(void) { return HEXAGON_HVX_GET_P(data); };\n" "\n" " // NOTE: All set methods return a HVX_Vect type.\n" " // Set HVX VectorPair method.\n" " HVX_Vect W(HVX_VectorPair w) { return HVX_Vect(HEXAGON_HVX_PUT_W(data, w)); };\n" "\n" " // Set HVX Vector methods.\n" " HVX_Vect V0(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V0(data, v)); };\n" " HVX_Vect V1(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V1(data, v)); };\n" " HVX_Vect P(HVX_VectorPred p) { return HVX_Vect(HEXAGON_HVX_PUT_P(data, p)); };\n" "\n" "private:\n" " HVX_VectorPair data;\n" "};\n" "\n" "#endif /* __cplusplus */\n" "#endif /* __HVX__ */\n" "\n" "#define HEXAGON_UDMA_DM0_STATUS_IDLE 0x00000000\n" "#define HEXAGON_UDMA_DM0_STATUS_RUN 0x00000001\n" "#define HEXAGON_UDMA_DM0_STATUS_ERROR 0x00000002\n" "#define HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE 0\n" "#define HEXAGON_UDMA_DESC_DSTATE_COMPLETE 1\n" "#define HEXAGON_UDMA_DESC_ORDER_NOORDER 0\n" "#define HEXAGON_UDMA_DESC_ORDER_ORDER 1\n" "#define HEXAGON_UDMA_DESC_BYPASS_OFF 0\n" "#define HEXAGON_UDMA_DESC_BYPASS_ON 1\n" "#define HEXAGON_UDMA_DESC_COMP_NONE 0\n" "#define HEXAGON_UDMA_DESC_COMP_DLBC 1\n" "#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE0 0\n" "#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE1 1\n" "\n" "typedef struct hexagon_udma_descriptor_type0_s\n" "{\n" " void *next;\n" " unsigned int length:24;\n" " unsigned int desctype:2;\n" " unsigned int dstcomp:1;\n" " unsigned int srccomp:1;\n" " unsigned int dstbypass:1;\n" " unsigned int srcbypass:1;\n" " unsigned int order:1;\n" " unsigned int dstate:1;\n" " void *src;\n" " void *dst;\n" "} hexagon_udma_descriptor_type0_t;\n" "\n" "typedef struct hexagon_udma_descriptor_type1_s\n" "{\n" " void *next;\n" " unsigned int length:24;\n" " unsigned int desctype:2;\n" " unsigned int dstcomp:1;\n" " unsigned int srccomp:1;\n" " unsigned int dstbypass:1;\n" " unsigned int srcbypass:1;\n" " unsigned int order:1;\n" " unsigned int dstate:1;\n" " void *src;\n" " void *dst;\n" " unsigned int allocation:28;\n" " unsigned int padding:4;\n" " unsigned int roiwidth:16;\n" " unsigned int roiheight:16;\n" " unsigned int srcstride:16;\n" " unsigned int dststride:16;\n" " unsigned int srcwidthoffset:16;\n" " unsigned int dstwidthoffset:16;\n" "} hexagon_udma_descriptor_type1_t;\n" "\n" "#endif /* !HEXAGON_TYPES_H */\n" "" } , { "/builtins/hresetintrin.h" , "/*===---------------- hresetintrin.h - HRESET intrinsics -------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __X86GPRINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __HRESETINTRIN_H\n" "#define __HRESETINTRIN_H\n" "\n" "#if __has_extension(gnu_asm)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"hreset\")))\n" "\n" "/// Provides a hint to the processor to selectively reset the prediction\n" "/// history of the current logical processor specified by a 32-bit integer\n" "/// value \\a __eax.\n" "///\n" "/// This intrinsic corresponds to the HRESET instruction.\n" "///\n" "/// \\code{.operation}\n" "/// IF __eax == 0\n" "/// // nop\n" "/// ELSE\n" "/// FOR i := 0 to 31\n" "/// IF __eax[i]\n" "/// ResetPredictionFeature(i)\n" "/// FI\n" "/// ENDFOR\n" "/// FI\n" "/// \\endcode\n" "static __inline void __DEFAULT_FN_ATTRS\n" "_hreset(int __eax)\n" "{\n" " __asm__ (\"hreset $0\" :: \"a\"(__eax));\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __has_extension(gnu_asm) */\n" "\n" "#endif /* __HRESETINTRIN_H */\n" "" } , { "/builtins/htmintrin.h" , "/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#ifndef __HTMINTRIN_H\n" "#define __HTMINTRIN_H\n" "\n" "#ifndef __HTM__\n" "#error \"HTM instruction set not enabled\"\n" "#endif\n" "\n" "#ifdef __powerpc__\n" "\n" "#include \n" "\n" "typedef uint64_t texasr_t;\n" "typedef uint32_t texasru_t;\n" "typedef uint32_t texasrl_t;\n" "typedef uintptr_t tfiar_t;\n" "typedef uintptr_t tfhar_t;\n" "\n" "#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3)\n" "#define _HTM_NONTRANSACTIONAL 0x0\n" "#define _HTM_SUSPENDED 0x1\n" "#define _HTM_TRANSACTIONAL 0x2\n" "\n" "#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \\\n" " (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1))\n" "#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \\\n" " (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))\n" "\n" "#define _TEXASR_FAILURE_CODE(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 7, 8)\n" "#define _TEXASRU_FAILURE_CODE(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8)\n" "\n" "#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 7, 1)\n" "#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)\n" "\n" "#define _TEXASR_DISALLOWED(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 8, 1)\n" "#define _TEXASRU_DISALLOWED(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1)\n" "\n" "#define _TEXASR_NESTING_OVERFLOW(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 9, 1)\n" "#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1)\n" "\n" "#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 10, 1)\n" "#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1)\n" "\n" "#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 11, 1)\n" "#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1)\n" "\n" "#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 12, 1)\n" "#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1)\n" "\n" "#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 13, 1)\n" "#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1)\n" "\n" "#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 14, 1)\n" "#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1)\n" "\n" "#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 15, 1)\n" "#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1)\n" "\n" "#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 16, 1)\n" "#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1)\n" "\n" "#define _TEXASR_ABORT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 31, 1)\n" "#define _TEXASRU_ABORT(TEXASRU) \\\n" " _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1)\n" "\n" "\n" "#define _TEXASR_SUSPENDED(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 32, 1)\n" "\n" "#define _TEXASR_PRIVILEGE(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 35, 2)\n" "\n" "#define _TEXASR_FAILURE_SUMMARY(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 36, 1)\n" "\n" "#define _TEXASR_TFIAR_EXACT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 37, 1)\n" "\n" "#define _TEXASR_ROT(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 38, 1)\n" "\n" "#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \\\n" " _TEXASR_EXTRACT_BITS(TEXASR, 63, 12)\n" "\n" "#endif /* __powerpc */\n" "\n" "#ifdef __s390__\n" "\n" "/* Condition codes generated by tbegin */\n" "#define _HTM_TBEGIN_STARTED 0\n" "#define _HTM_TBEGIN_INDETERMINATE 1\n" "#define _HTM_TBEGIN_TRANSIENT 2\n" "#define _HTM_TBEGIN_PERSISTENT 3\n" "\n" "/* The abort codes below this threshold are reserved for machine use. */\n" "#define _HTM_FIRST_USER_ABORT_CODE 256\n" "\n" "/* The transaction diagnostic block is it is defined in the Principles\n" " of Operation chapter 5-91. */\n" "\n" "struct __htm_tdb {\n" " unsigned char format; /* 0 */\n" " unsigned char flags;\n" " unsigned char reserved1[4];\n" " unsigned short nesting_depth;\n" " unsigned long long abort_code; /* 8 */\n" " unsigned long long conflict_token; /* 16 */\n" " unsigned long long atia; /* 24 */\n" " unsigned char eaid; /* 32 */\n" " unsigned char dxc;\n" " unsigned char reserved2[2];\n" " unsigned int program_int_id;\n" " unsigned long long exception_id; /* 40 */\n" " unsigned long long bea; /* 48 */\n" " unsigned char reserved3[72]; /* 56 */\n" " unsigned long long gprs[16]; /* 128 */\n" "} __attribute__((__packed__, __aligned__ (8)));\n" "\n" "\n" "/* Helper intrinsics to retry tbegin in case of transient failure. */\n" "\n" "static __inline int __attribute__((__always_inline__, __nodebug__))\n" "__builtin_tbegin_retry_null (int __retry)\n" "{\n" " int cc, i = 0;\n" "\n" " while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT\n" " && i++ < __retry)\n" " __builtin_tx_assist(i);\n" "\n" " return cc;\n" "}\n" "\n" "static __inline int __attribute__((__always_inline__, __nodebug__))\n" "__builtin_tbegin_retry_tdb (void *__tdb, int __retry)\n" "{\n" " int cc, i = 0;\n" "\n" " while ((cc = __builtin_tbegin(__tdb)) == _HTM_TBEGIN_TRANSIENT\n" " && i++ < __retry)\n" " __builtin_tx_assist(i);\n" "\n" " return cc;\n" "}\n" "\n" "#define __builtin_tbegin_retry(tdb, retry) \\\n" " (__builtin_constant_p(tdb == 0) && tdb == 0 ? \\\n" " __builtin_tbegin_retry_null(retry) : \\\n" " __builtin_tbegin_retry_tdb(tdb, retry))\n" "\n" "static __inline int __attribute__((__always_inline__, __nodebug__))\n" "__builtin_tbegin_retry_nofloat_null (int __retry)\n" "{\n" " int cc, i = 0;\n" "\n" " while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT\n" " && i++ < __retry)\n" " __builtin_tx_assist(i);\n" "\n" " return cc;\n" "}\n" "\n" "static __inline int __attribute__((__always_inline__, __nodebug__))\n" "__builtin_tbegin_retry_nofloat_tdb (void *__tdb, int __retry)\n" "{\n" " int cc, i = 0;\n" "\n" " while ((cc = __builtin_tbegin_nofloat(__tdb)) == _HTM_TBEGIN_TRANSIENT\n" " && i++ < __retry)\n" " __builtin_tx_assist(i);\n" "\n" " return cc;\n" "}\n" "\n" "#define __builtin_tbegin_retry_nofloat(tdb, retry) \\\n" " (__builtin_constant_p(tdb == 0) && tdb == 0 ? \\\n" " __builtin_tbegin_retry_nofloat_null(retry) : \\\n" " __builtin_tbegin_retry_nofloat_tdb(tdb, retry))\n" "\n" "#endif /* __s390__ */\n" "\n" "#endif /* __HTMINTRIN_H */\n" "" } , { "/builtins/htmxlintrin.h" , "/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#ifndef __HTMXLINTRIN_H\n" "#define __HTMXLINTRIN_H\n" "\n" "#ifndef __HTM__\n" "#error \"HTM instruction set not enabled\"\n" "#endif\n" "\n" "#include \n" "\n" "#ifdef __powerpc__\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "#define _TEXASR_PTR(TM_BUF) ((texasr_t *)((char *)(TM_BUF) + 0))\n" "#define _TEXASRU_PTR(TM_BUF) ((texasru_t *)((char *)(TM_BUF) + 0))\n" "#define _TEXASRL_PTR(TM_BUF) ((texasrl_t *)((char *)(TM_BUF) + 4))\n" "#define _TFIAR_PTR(TM_BUF) ((tfiar_t *)((char *)(TM_BUF) + 8))\n" "\n" "typedef char TM_buff_type[16];\n" "\n" "/* This macro can be used to determine whether a transaction was successfully\n" " started from the __TM_begin() and __TM_simple_begin() intrinsic functions\n" " below. */\n" "#define _HTM_TBEGIN_STARTED 1\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_simple_begin (void)\n" "{\n" " if (__builtin_expect (__builtin_tbegin (0), 1))\n" " return _HTM_TBEGIN_STARTED;\n" " return 0;\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_begin (void* const __TM_buff)\n" "{\n" " *_TEXASRL_PTR (__TM_buff) = 0;\n" " if (__builtin_expect (__builtin_tbegin (0), 1))\n" " return _HTM_TBEGIN_STARTED;\n" "#ifdef __powerpc64__\n" " *_TEXASR_PTR (__TM_buff) = __builtin_get_texasr ();\n" "#else\n" " *_TEXASRU_PTR (__TM_buff) = __builtin_get_texasru ();\n" " *_TEXASRL_PTR (__TM_buff) = __builtin_get_texasr ();\n" "#endif\n" " *_TFIAR_PTR (__TM_buff) = __builtin_get_tfiar ();\n" " return 0;\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_end (void)\n" "{\n" " if (__builtin_expect (__builtin_tend (0), 1))\n" " return 1;\n" " return 0;\n" "}\n" "\n" "extern __inline void\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_abort (void)\n" "{\n" " __builtin_tabort (0);\n" "}\n" "\n" "extern __inline void\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_named_abort (unsigned char const __code)\n" "{\n" " __builtin_tabort (__code);\n" "}\n" "\n" "extern __inline void\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_resume (void)\n" "{\n" " __builtin_tresume ();\n" "}\n" "\n" "extern __inline void\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_suspend (void)\n" "{\n" " __builtin_tsuspend ();\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_user_abort (void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " return _TEXASRU_ABORT (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_named_user_abort (void* const __TM_buff, unsigned char *__code)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" "\n" " *__code = _TEXASRU_FAILURE_CODE (texasru);\n" " return _TEXASRU_ABORT (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_illegal (void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " return _TEXASRU_DISALLOWED (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_footprint_exceeded (void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " return _TEXASRU_FOOTPRINT_OVERFLOW (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_nesting_depth (void* const __TM_buff)\n" "{\n" " texasrl_t texasrl;\n" "\n" " if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL)\n" " {\n" " texasrl = *_TEXASRL_PTR (__TM_buff);\n" " if (!_TEXASR_FAILURE_SUMMARY (texasrl))\n" " texasrl = 0;\n" " }\n" " else\n" " texasrl = (texasrl_t) __builtin_get_texasr ();\n" "\n" " return _TEXASR_TRANSACTION_LEVEL (texasrl);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_nested_too_deep(void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " return _TEXASRU_NESTING_OVERFLOW (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_conflict(void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " /* Return TEXASR bits 11 (Self-Induced Conflict) through\n" " 14 (Translation Invalidation Conflict). */\n" " return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0;\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_is_failure_persistent(void* const __TM_buff)\n" "{\n" " texasru_t texasru = *_TEXASRU_PTR (__TM_buff);\n" " return _TEXASRU_FAILURE_PERSISTENT (texasru);\n" "}\n" "\n" "extern __inline long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_failure_address(void* const __TM_buff)\n" "{\n" " return *_TFIAR_PTR (__TM_buff);\n" "}\n" "\n" "extern __inline long long\n" "__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))\n" "__TM_failure_code(void* const __TM_buff)\n" "{\n" " return *_TEXASR_PTR (__TM_buff);\n" "}\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "\n" "#endif /* __powerpc__ */\n" "\n" "#ifdef __s390__\n" "\n" "#include \n" "\n" "/* These intrinsics are being made available for compatibility with\n" " the IBM XL compiler. For documentation please see the \"z/OS XL\n" " C/C++ Programming Guide\" publicly available on the web. */\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_simple_begin ()\n" "{\n" " return __builtin_tbegin_nofloat (0);\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_begin (void* const __tdb)\n" "{\n" " return __builtin_tbegin_nofloat (__tdb);\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_end ()\n" "{\n" " return __builtin_tend ();\n" "}\n" "\n" "static __inline void __attribute__((__always_inline__))\n" "__TM_abort ()\n" "{\n" " return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE);\n" "}\n" "\n" "static __inline void __attribute__((__always_inline__, __nodebug__))\n" "__TM_named_abort (unsigned char const __code)\n" "{\n" " return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + __code);\n" "}\n" "\n" "static __inline void __attribute__((__always_inline__, __nodebug__))\n" "__TM_non_transactional_store (void* const __addr, long long const __value)\n" "{\n" " __builtin_non_tx_store ((uint64_t*)__addr, (uint64_t)__value);\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_nesting_depth (void* const __tdb_ptr)\n" "{\n" " int depth = __builtin_tx_nesting_depth ();\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " if (depth != 0)\n" " return depth;\n" "\n" " if (tdb->format != 1)\n" " return 0;\n" " return tdb->nesting_depth;\n" "}\n" "\n" "/* Transaction failure diagnostics */\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_user_abort (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " if (tdb->format != 1)\n" " return 0;\n" "\n" " return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE);\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_named_user_abort (void* const __tdb_ptr, unsigned char* __code)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " if (tdb->format != 1)\n" " return 0;\n" "\n" " if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE)\n" " {\n" " *__code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE;\n" " return 1;\n" " }\n" " return 0;\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_illegal (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " return (tdb->format == 1\n" " && (tdb->abort_code == 4 /* unfiltered program interruption */\n" " || tdb->abort_code == 11 /* restricted instruction */));\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_footprint_exceeded (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " return (tdb->format == 1\n" " && (tdb->abort_code == 7 /* fetch overflow */\n" " || tdb->abort_code == 8 /* store overflow */));\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_nested_too_deep (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_conflict (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " return (tdb->format == 1\n" " && (tdb->abort_code == 9 /* fetch conflict */\n" " || tdb->abort_code == 10 /* store conflict */));\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_is_failure_persistent (long const __result)\n" "{\n" " return __result == _HTM_TBEGIN_PERSISTENT;\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_failure_address (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" " return tdb->atia;\n" "}\n" "\n" "static __inline long __attribute__((__always_inline__, __nodebug__))\n" "__TM_failure_code (void* const __tdb_ptr)\n" "{\n" " struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr;\n" "\n" " return tdb->abort_code;\n" "}\n" "\n" "#endif /* __s390__ */\n" "\n" "#endif /* __HTMXLINTRIN_H */\n" "" } , { "/builtins/hvx_hexagon_protos.h" , "//===----------------------------------------------------------------------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "// Automatically generated file, do not edit!\n" "//===----------------------------------------------------------------------===//\n" "\n" "\n" "#ifndef _HVX_HEXAGON_PROTOS_H_\n" "#define _HVX_HEXAGON_PROTOS_H_ 1\n" "\n" "#ifdef __HVX__\n" "#if __HVX_LENGTH__ == 128\n" "#define __BUILTIN_VECTOR_WRAP(a) a ## _128B\n" "#else\n" "#define __BUILTIN_VECTOR_WRAP(a) a\n" "#endif\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Rd32=vextract(Vu32,Rs32)\n" " C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs)\n" " Instruction Type: LD\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=hi(Vss32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=lo(Vss32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vsplat(Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=and(Qs4,Qt4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=and(Qs4,!Qt4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=not(Qs4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=or(Qs4,Qt4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=or(Qs4,!Qt4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vsetq(Rt32)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=xor(Qs4,Qt4)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) vmem(Rt32+#s4)=Vs32\n" " C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)\n" " Instruction Type: CVI_VM_ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) vmem(Rt32+#s4):nt=Vs32\n" " C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)\n" " Instruction Type: CVI_VM_ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) vmem(Rt32+#s4):nt=Vs32\n" " C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)\n" " Instruction Type: CVI_VM_ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) vmem(Rt32+#s4)=Vs32\n" " C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)\n" " Instruction Type: CVI_VM_ST\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vabsdiff(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vabsdiff(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vabs(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vabs(Vu32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vabs(Vu32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vabs(Vu32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.b+=Vu32.b\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.b+=Vu32.b\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.h+=Vu32.h\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.h+=Vu32.h\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vadd(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vadd(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vadd(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.w+=Vu32.w\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.w+=Vu32.w\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=valign(Vu32,Vv32,Rt8)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=valign(Vu32,Vv32,#u3)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vand(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vand(Qu4,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32|=vand(Qu4,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vand(Vu32,Rt32)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vand(Vu32,Rt32)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasl(Vu32.h,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasl(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vasl(Vu32.w,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vasl(Vu32.w,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vasl(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasr(Vu32.h,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasr(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vasr(Vu32.w,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vasr(Vu32.w,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vasr(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=Vu32\n" " C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=Vuu32\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vcl0(Vu32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vcl0(Vu32.uw)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=vcombine(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=#0\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vzero()\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vdeal(Vu32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vdeale(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vdeal(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=vdeal(Vu32,Vv32,Rt8)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vdelta(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vdmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h+=vdmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vdmpy(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.eq(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.eq(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.eq(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.eq(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.eq(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.eq(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.eq(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.eq(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.eq(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.eq(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.eq(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.eq(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w=vinsert(Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vlalign(Vu32,Vv32,Rt8)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vlalign(Vu32,Vv32,#u3)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vlsr(Vu32.uh,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vlsr(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vlsr(Vu32.uw,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vlsr(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmax(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vmax(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vmax(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmax(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmin(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vmin(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vmin(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmin(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vmpa(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpa(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpy(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vmpy(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpye(Vu32.w,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Rt32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyieo(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyie(Vu32.w,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyio(Vu32.w,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vmux(Qt4,Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vnavg(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vnavg(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vnavg(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vnormamt(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vnormamt(Vu32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vnot(Vu32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vor(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vpacke(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vpacke(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vpack(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vpack(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vpacko(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vpacko(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vpack(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vpack(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vpopcount(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vrdelta(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vrmpy(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vror(Vu32,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vround(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vround(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vround(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vround(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vsat(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vsat(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vsxt(Vu32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vsxt(Vu32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vshuffe(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vshuff(Vu32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vshuffe(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vshuff(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vshuffo(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=vshuff(Vu32,Vv32,Rt8)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.b=vshuffoe(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vshuffoe(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vshuffo(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.b-=Vu32.b\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.b-=Vu32.b\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.h-=Vu32.h\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.h-=Vu32.h\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vsub(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vsub(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vsub(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (!Qv4) Vx32.w-=Vu32.w\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qv4) Vx32.w-=Vu32.w\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=vswap(Qt4,Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vtmpy(Vuu32.b,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vtmpy(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vunpack(Vu32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vunpack(Vu32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h|=vunpacko(Vu32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w|=vunpacko(Vu32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vunpack(Vu32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vunpack(Vu32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vxor(Vu32,Vv32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uh=vzxt(Vu32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 60\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vzxt(Vu32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 60 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vsplat(Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vsplat(Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vsetq2(Rt32)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4.b=vshuffe(Qs4.h,Qt4.h)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4.h=vshuffe(Qs4.w,Qt4.w)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vadd(vclb(Vu32.h),Vv32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vadd(vclb(Vu32.w),Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vadd(Vu32.h,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vadd(Vu32.ub,Vv32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.b):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vadd(Vu32.uh,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vand(!Qu4,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32|=vand(!Qu4,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vand(!Qv4,Vu32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32=vand(Qv4,Vu32)\n" " C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vlsr(Vu32.ub,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vmax(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vmin(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=vmpa(Vuu32.uh,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=vmpye(Vu32.w,Vv32.uh)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.ub)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32+=vmpyo(Vu32.w,Vv32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vround(Vu32.uh,Vv32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vround(Vu32.uw,Vv32.uw):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vsat(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.b):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 62\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_VA_DV\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 62 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vabs(Vu32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vabs(Vu32.b):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h+=vasl(Vu32.h,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h+=vasr(Vu32.h,Rt32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32=#0\n" " C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero()\n" " Instruction Type: MAPPING\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vv32.h).h\n" " C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)\n" " Instruction Type: CVI_GATHER\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h\n" " C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)\n" " Instruction Type: CVI_GATHER\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h\n" " C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_GATHER_DV\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h\n" " C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)\n" " Instruction Type: CVI_GATHER_DV\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vtmp.w=vgather(Rt32,Mu2,Vv32.w).w\n" " C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)\n" " Instruction Type: CVI_GATHER\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w\n" " C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)\n" " Instruction Type: CVI_GATHER\n" " Execution Slots: SLOT01\n" " ========================================================================== */\n" "\n" "#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vlut4(Vu32.uh,Rtt32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT2\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT2\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT2\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT2\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vmpye(Vu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vnavg(Vu32.b,Vv32.b)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=prefixsum(Qv4)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=prefixsum(Qv4)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=prefixsum(Qv4)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h+=Vw32\n" " C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER_DV\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32\n" " C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER_DV\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER_DV\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w+=Vw32\n" " C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 65\n" "/* ==========================================================================\n" " Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32\n" " C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)\n" " Instruction Type: CVI_SCATTER\n" " Execution Slots: SLOT0\n" " ========================================================================== */\n" "\n" "#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)\n" "#endif /* __HEXAGON_ARCH___ >= 65 */\n" "\n" "#if __HVX_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HVX_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w=vasrinto(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VP_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HVX_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uw=vrotr(Vu32.uw,Vv32.uw)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HVX_ARCH__ >= 66\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vsatdw(Vu32.w,Vv32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 66 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vabs(Vu32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vabs(Vu32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vadd(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vadd(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vadd(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.sf=vadd(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vadd(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.w=vfmv(Vu32.w)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=Vu32.qf16\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=Vuu32.qf32\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=Vu32.qf32\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.b=vcvt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.h=vcvt(Vu32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.hf=vcvt(Vu32.b)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vcvt(Vu32.h)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vcvt(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.hf=vcvt(Vu32.ub)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vcvt(Vu32.uh)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.sf=vcvt(Vu32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vcvt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vcvt(Vu32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vfmax(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vfmax(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vfmin(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vfmin(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vfneg(Vu32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vfneg(Vu32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)\n" " Instruction Type: CVI_VX_LATE\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qd4=vcmp.gt(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vmax(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vmax(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vmin(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vmin(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VA\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vmpy(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.hf=vsub(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vdd32.sf=vsub(Vu32.hf,Vv32.hf)\n" " C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX_DV\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 68\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.sf=vsub(Vu32.sf,Vv32.sf)\n" " C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 68 */\n" "\n" "#if __HVX_ARCH__ >= 69\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 69 */\n" "\n" "#if __HVX_ARCH__ >= 69\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 69 */\n" "\n" "#if __HVX_ARCH__ >= 69\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 69 */\n" "\n" "#if __HVX_ARCH__ >= 69\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)\n" " Instruction Type: CVI_VS\n" " Execution Slots: SLOT0123\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 69 */\n" "\n" "#if __HVX_ARCH__ >= 69\n" "/* ==========================================================================\n" " Assembly Syntax: Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16\n" " C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)\n" " Instruction Type: CVI_VX\n" " Execution Slots: SLOT23\n" " ========================================================================== */\n" "\n" "#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)\n" "#endif /* __HEXAGON_ARCH___ >= 69 */\n" "\n" "#endif /* __HVX__ */\n" "\n" "#endif\n" "" } , { "/builtins/ia32intrin.h" , "/* ===-------- ia32intrin.h ---------------------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __IA32INTRIN_H\n" "#define __IA32INTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))\n" "#define __DEFAULT_FN_ATTRS_CRC32 __attribute__((__always_inline__, __nodebug__, __target__(\"crc32\")))\n" "\n" "#if defined(__cplusplus) && (__cplusplus >= 201103L)\n" "#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr\n" "#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr\n" "#else\n" "#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__))\n" "#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS\n" "#endif\n" "\n" "/** Find the first set bit starting from the lsb. Result is undefined if\n" " * input is 0.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSF instruction or the\n" " * TZCNT instruction.\n" " *\n" " * \\param __A\n" " * A 32-bit integer operand.\n" " * \\returns A 32-bit integer containing the bit number.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bsfd(int __A) {\n" " return __builtin_ctz((unsigned int)__A);\n" "}\n" "\n" "/** Find the first set bit starting from the msb. Result is undefined if\n" " * input is 0.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSR instruction or the\n" " * LZCNT instruction and an XOR .\n" " *\n" " * \\param __A\n" " * A 32-bit integer operand.\n" " * \\returns A 32-bit integer containing the bit number.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bsrd(int __A) {\n" " return 31 - __builtin_clz((unsigned int)__A);\n" "}\n" "\n" "/** Swaps the bytes in the input. Converting little endian to big endian or\n" " * vice versa.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSWAP instruction.\n" " *\n" " * \\param __A\n" " * A 32-bit integer operand.\n" " * \\returns A 32-bit integer containing the swapped bytes.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bswapd(int __A) {\n" " return (int)__builtin_bswap32((unsigned int)__A);\n" "}\n" "\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "_bswap(int __A) {\n" " return (int)__builtin_bswap32((unsigned int)__A);\n" "}\n" "\n" "#define _bit_scan_forward(A) __bsfd((A))\n" "#define _bit_scan_reverse(A) __bsrd((A))\n" "\n" "#ifdef __x86_64__\n" "/** Find the first set bit starting from the lsb. Result is undefined if\n" " * input is 0.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSF instruction or the\n" " * TZCNT instruction.\n" " *\n" " * \\param __A\n" " * A 64-bit integer operand.\n" " * \\returns A 32-bit integer containing the bit number.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bsfq(long long __A) {\n" " return (long long)__builtin_ctzll((unsigned long long)__A);\n" "}\n" "\n" "/** Find the first set bit starting from the msb. Result is undefined if\n" " * input is 0.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSR instruction or the\n" " * LZCNT instruction and an XOR .\n" " *\n" " * \\param __A\n" " * A 64-bit integer operand.\n" " * \\returns A 32-bit integer containing the bit number.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bsrq(long long __A) {\n" " return 63 - __builtin_clzll((unsigned long long)__A);\n" "}\n" "\n" "/** Swaps the bytes in the input. Converting little endian to big endian or\n" " * vice versa.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the BSWAP instruction.\n" " *\n" " * \\param __A\n" " * A 64-bit integer operand.\n" " * \\returns A 64-bit integer containing the swapped bytes.\n" " */\n" "static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__bswapq(long long __A) {\n" " return (long long)__builtin_bswap64((unsigned long long)__A);\n" "}\n" "\n" "#define _bswap64(A) __bswapq((A))\n" "#endif\n" "\n" "/** Counts the number of bits in the source operand having a value of 1.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the POPCNT instruction or a\n" " * a sequence of arithmetic and logic ops to calculate it.\n" " *\n" " * \\param __A\n" " * An unsigned 32-bit integer operand.\n" " * \\returns A 32-bit integer containing the number of bits with value 1 in the\n" " * source operand.\n" " */\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__popcntd(unsigned int __A)\n" "{\n" " return __builtin_popcount(__A);\n" "}\n" "\n" "#define _popcnt32(A) __popcntd((A))\n" "\n" "#ifdef __x86_64__\n" "/** Counts the number of bits in the source operand having a value of 1.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the POPCNT instruction or a\n" " * a sequence of arithmetic and logic ops to calculate it.\n" " *\n" " * \\param __A\n" " * An unsigned 64-bit integer operand.\n" " * \\returns A 64-bit integer containing the number of bits with value 1 in the\n" " * source operand.\n" " */\n" "static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__popcntq(unsigned long long __A)\n" "{\n" " return __builtin_popcountll(__A);\n" "}\n" "\n" "#define _popcnt64(A) __popcntq((A))\n" "#endif /* __x86_64__ */\n" "\n" "#ifdef __x86_64__\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__readeflags(void)\n" "{\n" " return __builtin_ia32_readeflags_u64();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "__writeeflags(unsigned long long __f)\n" "{\n" " __builtin_ia32_writeeflags_u64(__f);\n" "}\n" "\n" "#else /* !__x86_64__ */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__readeflags(void)\n" "{\n" " return __builtin_ia32_readeflags_u32();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "__writeeflags(unsigned int __f)\n" "{\n" " __builtin_ia32_writeeflags_u32(__f);\n" "}\n" "#endif /* !__x86_64__ */\n" "\n" "/** Cast a 32-bit float value to a 32-bit unsigned integer value\n" " *\n" " * \\headerfile \n" " * This intrinsic corresponds to the VMOVD / MOVD instruction in x86_64,\n" " * and corresponds to the VMOVL / MOVL instruction in ia32.\n" " *\n" " * \\param __A\n" " * A 32-bit float value.\n" " * \\returns a 32-bit unsigned integer containing the converted value.\n" " */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST\n" "_castf32_u32(float __A) {\n" " return __builtin_bit_cast(unsigned int, __A);\n" "}\n" "\n" "/** Cast a 64-bit float value to a 64-bit unsigned integer value\n" " *\n" " * \\headerfile \n" " * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64,\n" " * and corresponds to the VMOVL / MOVL instruction in ia32.\n" " *\n" " * \\param __A\n" " * A 64-bit float value.\n" " * \\returns a 64-bit unsigned integer containing the converted value.\n" " */\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST\n" "_castf64_u64(double __A) {\n" " return __builtin_bit_cast(unsigned long long, __A);\n" "}\n" "\n" "/** Cast a 32-bit unsigned integer value to a 32-bit float value\n" " *\n" " * \\headerfile \n" " * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64,\n" " * and corresponds to the FLDS instruction in ia32.\n" " *\n" " * \\param __A\n" " * A 32-bit unsigned integer value.\n" " * \\returns a 32-bit float value containing the converted value.\n" " */\n" "static __inline__ float __DEFAULT_FN_ATTRS_CAST\n" "_castu32_f32(unsigned int __A) {\n" " return __builtin_bit_cast(float, __A);\n" "}\n" "\n" "/** Cast a 64-bit unsigned integer value to a 64-bit float value\n" " *\n" " * \\headerfile \n" " * This intrinsic corresponds to the VMOVQ / MOVQ instruction in x86_64,\n" " * and corresponds to the FLDL instruction in ia32.\n" " *\n" " * \\param __A\n" " * A 64-bit unsigned integer value.\n" " * \\returns a 64-bit float value containing the converted value.\n" " */\n" "static __inline__ double __DEFAULT_FN_ATTRS_CAST\n" "_castu64_f64(unsigned long long __A) {\n" " return __builtin_bit_cast(double, __A);\n" "}\n" "\n" "/** Adds the unsigned integer operand to the CRC-32C checksum of the\n" " * unsigned char operand.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the CRC32B instruction.\n" " *\n" " * \\param __C\n" " * An unsigned integer operand to add to the CRC-32C checksum of operand\n" " * \\a __D.\n" " * \\param __D\n" " * An unsigned 8-bit integer operand used to compute the CRC-32C checksum.\n" " * \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" " * operand \\a __D.\n" " */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32\n" "__crc32b(unsigned int __C, unsigned char __D)\n" "{\n" " return __builtin_ia32_crc32qi(__C, __D);\n" "}\n" "\n" "/** Adds the unsigned integer operand to the CRC-32C checksum of the\n" " * unsigned short operand.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the CRC32W instruction.\n" " *\n" " * \\param __C\n" " * An unsigned integer operand to add to the CRC-32C checksum of operand\n" " * \\a __D.\n" " * \\param __D\n" " * An unsigned 16-bit integer operand used to compute the CRC-32C checksum.\n" " * \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" " * operand \\a __D.\n" " */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32\n" "__crc32w(unsigned int __C, unsigned short __D)\n" "{\n" " return __builtin_ia32_crc32hi(__C, __D);\n" "}\n" "\n" "/** Adds the unsigned integer operand to the CRC-32C checksum of the\n" " * second unsigned integer operand.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the CRC32D instruction.\n" " *\n" " * \\param __C\n" " * An unsigned integer operand to add to the CRC-32C checksum of operand\n" " * \\a __D.\n" " * \\param __D\n" " * An unsigned 32-bit integer operand used to compute the CRC-32C checksum.\n" " * \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" " * operand \\a __D.\n" " */\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32\n" "__crc32d(unsigned int __C, unsigned int __D)\n" "{\n" " return __builtin_ia32_crc32si(__C, __D);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/** Adds the unsigned integer operand to the CRC-32C checksum of the\n" " * unsigned 64-bit integer operand.\n" " *\n" " * \\headerfile \n" " *\n" " * This intrinsic corresponds to the CRC32Q instruction.\n" " *\n" " * \\param __C\n" " * An unsigned integer operand to add to the CRC-32C checksum of operand\n" " * \\a __D.\n" " * \\param __D\n" " * An unsigned 64-bit integer operand used to compute the CRC-32C checksum.\n" " * \\returns The result of adding operand \\a __C to the CRC-32C checksum of\n" " * operand \\a __D.\n" " */\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CRC32\n" "__crc32q(unsigned long long __C, unsigned long long __D)\n" "{\n" " return __builtin_ia32_crc32di(__C, __D);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__rdpmc(int __A) {\n" " return __builtin_ia32_rdpmc(__A);\n" "}\n" "\n" "/* __rdtscp */\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__rdtscp(unsigned int *__A) {\n" " return __builtin_ia32_rdtscp(__A);\n" "}\n" "\n" "#define _rdtsc() __rdtsc()\n" "\n" "#define _rdpmc(A) __rdpmc(A)\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_wbinvd(void) {\n" " __builtin_ia32_wbinvd();\n" "}\n" "\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rolb(unsigned char __X, int __C) {\n" " return __builtin_rotateleft8(__X, __C);\n" "}\n" "\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rorb(unsigned char __X, int __C) {\n" " return __builtin_rotateright8(__X, __C);\n" "}\n" "\n" "static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rolw(unsigned short __X, int __C) {\n" " return __builtin_rotateleft16(__X, __C);\n" "}\n" "\n" "static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rorw(unsigned short __X, int __C) {\n" " return __builtin_rotateright16(__X, __C);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rold(unsigned int __X, int __C) {\n" " return __builtin_rotateleft32(__X, (unsigned int)__C);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rord(unsigned int __X, int __C) {\n" " return __builtin_rotateright32(__X, (unsigned int)__C);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rolq(unsigned long long __X, int __C) {\n" " return __builtin_rotateleft64(__X, (unsigned long long)__C);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR\n" "__rorq(unsigned long long __X, int __C) {\n" " return __builtin_rotateright64(__X, (unsigned long long)__C);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "#ifndef _MSC_VER\n" "/* These are already provided as builtins for MSVC. */\n" "/* Select the correct function based on the size of long. */\n" "#ifdef __LP64__\n" "#define _lrotl(a,b) __rolq((a), (b))\n" "#define _lrotr(a,b) __rorq((a), (b))\n" "#else\n" "#define _lrotl(a,b) __rold((a), (b))\n" "#define _lrotr(a,b) __rord((a), (b))\n" "#endif\n" "#define _rotl(a,b) __rold((a), (b))\n" "#define _rotr(a,b) __rord((a), (b))\n" "#endif // _MSC_VER\n" "\n" "/* These are not builtins so need to be provided in all modes. */\n" "#define _rotwl(a,b) __rolw((a), (b))\n" "#define _rotwr(a,b) __rorw((a), (b))\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_CAST\n" "#undef __DEFAULT_FN_ATTRS_CRC32\n" "#undef __DEFAULT_FN_ATTRS_CONSTEXPR\n" "\n" "#endif /* __IA32INTRIN_H */\n" "" } , { "/builtins/immintrin.h" , "/*===---- immintrin.h - Intel intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#define __IMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__MMX__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SSE__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SSE2__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SSE3__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SSSE3__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__SSE4_2__) || defined(__SSE4_1__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AES__) || defined(__PCLMUL__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CLFLUSHOPT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CLWB__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX2__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__F16C__)\n" "#include \n" "#endif\n" "\n" "/* No feature check desired due to internal checks */\n" "#include \n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__BMI2__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__LZCNT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__POPCNT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__FMA__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512F__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VL__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512BW__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512BITALG__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512CD__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VPOPCNTDQ__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VNNI__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512VNNI__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVXVNNI__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512DQ__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512BITALG__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512BW__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512CD__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512DQ__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512ER__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512IFMA__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512IFMA__) && defined(__AVX512VL__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVXIFMA__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VBMI__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VBMI__) && defined(__AVX512VL__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VBMI2__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VBMI2__) && defined(__AVX512VL__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512PF__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512FP16__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512FP16__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512BF16__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512BF16__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__PKU__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__VPCLMULQDQ__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__VAES__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__GFNI__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVXVNNIINT8__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVXNECONVERT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SHA512__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SM3__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SM4__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVXVNNIINT16__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RDPID__)\n" "/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDPID instruction.\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__(\"rdpid\")))\n" "_rdpid_u32(void) {\n" " return __builtin_ia32_rdpid();\n" "}\n" "#endif // __RDPID__\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RDRND__)\n" "/// Returns a 16-bit hardware-generated random value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDRAND instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 16-bit memory location to place the random value.\n" "/// \\returns 1 if the value was successfully generated, 0 otherwise.\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__(\"rdrnd\")))\n" "_rdrand16_step(unsigned short *__p)\n" "{\n" " return (int)__builtin_ia32_rdrand16_step(__p);\n" "}\n" "\n" "/// Returns a 32-bit hardware-generated random value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDRAND instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 32-bit memory location to place the random value.\n" "/// \\returns 1 if the value was successfully generated, 0 otherwise.\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__(\"rdrnd\")))\n" "_rdrand32_step(unsigned int *__p)\n" "{\n" " return (int)__builtin_ia32_rdrand32_step(__p);\n" "}\n" "\n" "/// Returns a 64-bit hardware-generated random value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDRAND instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 64-bit memory location to place the random value.\n" "/// \\returns 1 if the value was successfully generated, 0 otherwise.\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__(\"rdrnd\")))\n" "_rdrand64_step(unsigned long long *__p)\n" "{\n" "#ifdef __x86_64__\n" " return (int)__builtin_ia32_rdrand64_step(__p);\n" "#else\n" " // We need to emulate the functionality of 64-bit rdrand with 2 32-bit\n" " // rdrand instructions.\n" " unsigned int __lo, __hi;\n" " unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);\n" " unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);\n" " if (__res_lo && __res_hi) {\n" " *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo;\n" " return 1;\n" " } else {\n" " *__p = 0;\n" " return 0;\n" " }\n" "#endif\n" "}\n" "#endif /* __RDRND__ */\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__FSGSBASE__)\n" "#ifdef __x86_64__\n" "/// Reads the FS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDFSBASE instruction.\n" "///\n" "/// \\returns The lower 32 bits of the FS base register.\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_readfsbase_u32(void)\n" "{\n" " return __builtin_ia32_rdfsbase32();\n" "}\n" "\n" "/// Reads the FS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDFSBASE instruction.\n" "///\n" "/// \\returns The contents of the FS base register.\n" "static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_readfsbase_u64(void)\n" "{\n" " return __builtin_ia32_rdfsbase64();\n" "}\n" "\n" "/// Reads the GS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDGSBASE instruction.\n" "///\n" "/// \\returns The lower 32 bits of the GS base register.\n" "static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_readgsbase_u32(void)\n" "{\n" " return __builtin_ia32_rdgsbase32();\n" "}\n" "\n" "/// Reads the GS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDGSBASE instruction.\n" "///\n" "/// \\returns The contents of the GS base register.\n" "static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_readgsbase_u64(void)\n" "{\n" " return __builtin_ia32_rdgsbase64();\n" "}\n" "\n" "/// Modifies the FS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the WRFSBASE instruction.\n" "///\n" "/// \\param __V\n" "/// Value to use for the lower 32 bits of the FS base register.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_writefsbase_u32(unsigned int __V)\n" "{\n" " __builtin_ia32_wrfsbase32(__V);\n" "}\n" "\n" "/// Modifies the FS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the WRFSBASE instruction.\n" "///\n" "/// \\param __V\n" "/// Value to use for the FS base register.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_writefsbase_u64(unsigned long long __V)\n" "{\n" " __builtin_ia32_wrfsbase64(__V);\n" "}\n" "\n" "/// Modifies the GS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the WRGSBASE instruction.\n" "///\n" "/// \\param __V\n" "/// Value to use for the lower 32 bits of the GS base register.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_writegsbase_u32(unsigned int __V)\n" "{\n" " __builtin_ia32_wrgsbase32(__V);\n" "}\n" "\n" "/// Modifies the GS base register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the WRFSBASE instruction.\n" "///\n" "/// \\param __V\n" "/// Value to use for GS base register.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"fsgsbase\")))\n" "_writegsbase_u64(unsigned long long __V)\n" "{\n" " __builtin_ia32_wrgsbase64(__V);\n" "}\n" "\n" "#endif\n" "#endif /* __FSGSBASE__ */\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__MOVBE__)\n" "\n" "/* The structs used below are to force the load/store to be unaligned. This\n" " * is accomplished with the __packed__ attribute. The __may_alias__ prevents\n" " * tbaa metadata from being generated based on the struct and the type of the\n" " * field inside of it.\n" " */\n" "\n" "static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_loadbe_i16(void const * __P) {\n" " struct __loadu_i16 {\n" " unsigned short __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v);\n" "}\n" "\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_storebe_i16(void * __P, short __D) {\n" " struct __storeu_i16 {\n" " unsigned short __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D);\n" "}\n" "\n" "static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_loadbe_i32(void const * __P) {\n" " struct __loadu_i32 {\n" " unsigned int __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v);\n" "}\n" "\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_storebe_i32(void * __P, int __D) {\n" " struct __storeu_i32 {\n" " unsigned int __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_loadbe_i64(void const * __P) {\n" " struct __loadu_i64 {\n" " unsigned long long __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v);\n" "}\n" "\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"movbe\")))\n" "_storebe_i64(void * __P, long long __D) {\n" " struct __storeu_i64 {\n" " unsigned long long __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);\n" "}\n" "#endif\n" "#endif /* __MOVBE */\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RTM__)\n" "#include \n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SHA__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__FXSR__)\n" "#include \n" "#endif\n" "\n" "/* No feature check desired due to internal MSC_VER checks */\n" "#include \n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__XSAVEOPT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__XSAVEC__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__XSAVES__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SHSTK__)\n" "#include \n" "#endif\n" "\n" "/* Some intrinsics inside adxintrin.h are available only on processors with ADX,\n" " * whereas others are also available at all times. */\n" "#include \n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RDSEED__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__WBNOINVD__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CLDEMOTE__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__WAITPKG__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__MOVDIRI__) || defined(__MOVDIR64B__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__PCONFIG__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SGX__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__PTWRITE__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__INVPCID__)\n" "#include \n" "#endif\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AMX_FP16__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__KL__) || defined(__WIDEKL__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AMX_COMPLEX__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__AVX512VP2INTERSECT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__ENQCMD__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SERIALIZE__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__TSXLDTRK__)\n" "#include \n" "#endif\n" "\n" "#if defined(_MSC_VER) && __has_extension(gnu_asm)\n" "/* Define the default attributes for these intrinsics */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Exchange HLE\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__i386__) || defined(__x86_64__)\n" "static __inline__ long __DEFAULT_FN_ATTRS\n" "_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {\n" " __asm__ __volatile__(\".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}\"\n" " : \"+r\" (_Value), \"+m\" (*_Target) :: \"memory\");\n" " return _Value;\n" "}\n" "static __inline__ long __DEFAULT_FN_ATTRS\n" "_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {\n" " __asm__ __volatile__(\".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}\"\n" " : \"+r\" (_Value), \"+m\" (*_Target) :: \"memory\");\n" " return _Value;\n" "}\n" "#endif\n" "#if defined(__x86_64__)\n" "static __inline__ __int64 __DEFAULT_FN_ATTRS\n" "_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {\n" " __asm__ __volatile__(\".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}\"\n" " : \"+r\" (_Value), \"+m\" (*_Target) :: \"memory\");\n" " return _Value;\n" "}\n" "static __inline__ __int64 __DEFAULT_FN_ATTRS\n" "_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {\n" " __asm__ __volatile__(\".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}\"\n" " : \"+r\" (_Value), \"+m\" (*_Target) :: \"memory\");\n" " return _Value;\n" "}\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Compare Exchange HLE\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__i386__) || defined(__x86_64__)\n" "static __inline__ long __DEFAULT_FN_ATTRS\n" "_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,\n" " long _Exchange, long _Comparand) {\n" " __asm__ __volatile__(\".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}\"\n" " : \"+a\" (_Comparand), \"+m\" (*_Destination)\n" " : \"r\" (_Exchange) : \"memory\");\n" " return _Comparand;\n" "}\n" "static __inline__ long __DEFAULT_FN_ATTRS\n" "_InterlockedCompareExchange_HLERelease(long volatile *_Destination,\n" " long _Exchange, long _Comparand) {\n" " __asm__ __volatile__(\".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}\"\n" " : \"+a\" (_Comparand), \"+m\" (*_Destination)\n" " : \"r\" (_Exchange) : \"memory\");\n" " return _Comparand;\n" "}\n" "#endif\n" "#if defined(__x86_64__)\n" "static __inline__ __int64 __DEFAULT_FN_ATTRS\n" "_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand) {\n" " __asm__ __volatile__(\".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}\"\n" " : \"+a\" (_Comparand), \"+m\" (*_Destination)\n" " : \"r\" (_Exchange) : \"memory\");\n" " return _Comparand;\n" "}\n" "static __inline__ __int64 __DEFAULT_FN_ATTRS\n" "_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand) {\n" " __asm__ __volatile__(\".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}\"\n" " : \"+a\" (_Comparand), \"+m\" (*_Destination)\n" " : \"r\" (_Exchange) : \"memory\");\n" " return _Comparand;\n" "}\n" "#endif\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */\n" "\n" "#endif /* __IMMINTRIN_H */\n" "" } , { "/builtins/intrin.h" , "/* ===-------- intrin.h ---------------------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/* Only include this if we're compiling for the windows platform. */\n" "#ifndef _MSC_VER\n" "#include_next \n" "#else\n" "\n" "#ifndef __INTRIN_H\n" "#define __INTRIN_H\n" "\n" "/* First include the standard intrinsics. */\n" "#if defined(__i386__) || defined(__x86_64__)\n" "#include \n" "#endif\n" "\n" "#if defined(__arm__)\n" "#include \n" "#endif\n" "\n" "#if defined(__aarch64__)\n" "#include \n" "#endif\n" "\n" "/* For the definition of jmp_buf. */\n" "#if __STDC_HOSTED__\n" "#include \n" "#endif\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))\n" "\n" "#if __x86_64__\n" "#define __LPTRINT_TYPE__ __int64\n" "#else\n" "#define __LPTRINT_TYPE__ long\n" "#endif\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "#if defined(__MMX__)\n" "/* And the random ones that aren't in those files. */\n" "__m64 _m_from_float(float);\n" "float _m_to_float(__m64);\n" "#endif\n" "\n" "/* Other assorted instruction intrinsics. */\n" "void __addfsbyte(unsigned long, unsigned char);\n" "void __addfsdword(unsigned long, unsigned long);\n" "void __addfsword(unsigned long, unsigned short);\n" "void __code_seg(const char *);\n" "void __cpuid(int[4], int);\n" "void __cpuidex(int[4], int, int);\n" "__int64 __emul(int, int);\n" "unsigned __int64 __emulu(unsigned int, unsigned int);\n" "unsigned int __getcallerseflags(void);\n" "void __halt(void);\n" "unsigned char __inbyte(unsigned short);\n" "void __inbytestring(unsigned short, unsigned char *, unsigned long);\n" "void __incfsbyte(unsigned long);\n" "void __incfsdword(unsigned long);\n" "void __incfsword(unsigned long);\n" "unsigned long __indword(unsigned short);\n" "void __indwordstring(unsigned short, unsigned long *, unsigned long);\n" "void __int2c(void);\n" "void __invlpg(void *);\n" "unsigned short __inword(unsigned short);\n" "void __inwordstring(unsigned short, unsigned short *, unsigned long);\n" "void __lidt(void *);\n" "unsigned __int64 __ll_lshift(unsigned __int64, int);\n" "__int64 __ll_rshift(__int64, int);\n" "void __movsb(unsigned char *, unsigned char const *, size_t);\n" "void __movsd(unsigned long *, unsigned long const *, size_t);\n" "void __movsw(unsigned short *, unsigned short const *, size_t);\n" "void __nop(void);\n" "void __nvreg_restore_fence(void);\n" "void __nvreg_save_fence(void);\n" "void __outbyte(unsigned short, unsigned char);\n" "void __outbytestring(unsigned short, unsigned char *, unsigned long);\n" "void __outdword(unsigned short, unsigned long);\n" "void __outdwordstring(unsigned short, unsigned long *, unsigned long);\n" "void __outword(unsigned short, unsigned short);\n" "void __outwordstring(unsigned short, unsigned short *, unsigned long);\n" "unsigned long __readcr0(void);\n" "unsigned long __readcr2(void);\n" "unsigned __LPTRINT_TYPE__ __readcr3(void);\n" "unsigned long __readcr4(void);\n" "unsigned long __readcr8(void);\n" "unsigned int __readdr(unsigned int);\n" "#ifdef __i386__\n" "unsigned char __readfsbyte(unsigned long);\n" "unsigned short __readfsword(unsigned long);\n" "unsigned long __readfsdword(unsigned long);\n" "unsigned __int64 __readfsqword(unsigned long);\n" "#endif\n" "unsigned __int64 __readmsr(unsigned long);\n" "unsigned __int64 __readpmc(unsigned long);\n" "unsigned long __segmentlimit(unsigned long);\n" "void __sidt(void *);\n" "void __stosb(unsigned char *, unsigned char, size_t);\n" "void __stosd(unsigned long *, unsigned long, size_t);\n" "void __stosw(unsigned short *, unsigned short, size_t);\n" "void __svm_clgi(void);\n" "void __svm_invlpga(void *, int);\n" "void __svm_skinit(int);\n" "void __svm_stgi(void);\n" "void __svm_vmload(size_t);\n" "void __svm_vmrun(size_t);\n" "void __svm_vmsave(size_t);\n" "void __ud2(void);\n" "unsigned __int64 __ull_rshift(unsigned __int64, int);\n" "void __vmx_off(void);\n" "void __vmx_vmptrst(unsigned __int64 *);\n" "void __wbinvd(void);\n" "void __writecr0(unsigned int);\n" "void __writecr3(unsigned __INTPTR_TYPE__);\n" "void __writecr4(unsigned int);\n" "void __writecr8(unsigned int);\n" "void __writedr(unsigned int, unsigned int);\n" "void __writefsbyte(unsigned long, unsigned char);\n" "void __writefsdword(unsigned long, unsigned long);\n" "void __writefsqword(unsigned long, unsigned __int64);\n" "void __writefsword(unsigned long, unsigned short);\n" "void __writemsr(unsigned long, unsigned __int64);\n" "void *_AddressOfReturnAddress(void);\n" "unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);\n" "unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);\n" "unsigned char _bittest(long const *, long);\n" "unsigned char _bittestandcomplement(long *, long);\n" "unsigned char _bittestandreset(long *, long);\n" "unsigned char _bittestandset(long *, long);\n" "void __cdecl _disable(void);\n" "void __cdecl _enable(void);\n" "long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);\n" "unsigned char _interlockedbittestandreset(long volatile *, long);\n" "unsigned char _interlockedbittestandset(long volatile *, long);\n" "void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,\n" " void *);\n" "void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,\n" " void *);\n" "long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);\n" "long _InterlockedExchangeAdd_HLERelease(long volatile *, long);\n" "__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);\n" "__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);\n" "void _ReadBarrier(void);\n" "void _ReadWriteBarrier(void);\n" "unsigned int _rorx_u32(unsigned int, const unsigned int);\n" "int _sarx_i32(int, unsigned int);\n" "#if __STDC_HOSTED__\n" "int __cdecl _setjmp(jmp_buf);\n" "#endif\n" "unsigned int _shlx_u32(unsigned int, unsigned int);\n" "unsigned int _shrx_u32(unsigned int, unsigned int);\n" "void _Store_HLERelease(long volatile *, long);\n" "void _Store64_HLERelease(__int64 volatile *, __int64);\n" "void _StorePointer_HLERelease(void *volatile *, void *);\n" "void _WriteBarrier(void);\n" "unsigned __int32 xbegin(void);\n" "void _xend(void);\n" "\n" "/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */\n" "#ifdef __x86_64__\n" "void __addgsbyte(unsigned long, unsigned char);\n" "void __addgsdword(unsigned long, unsigned long);\n" "void __addgsqword(unsigned long, unsigned __int64);\n" "void __addgsword(unsigned long, unsigned short);\n" "void __faststorefence(void);\n" "void __incgsbyte(unsigned long);\n" "void __incgsdword(unsigned long);\n" "void __incgsqword(unsigned long);\n" "void __incgsword(unsigned long);\n" "void __movsq(unsigned long long *, unsigned long long const *, size_t);\n" "unsigned char __readgsbyte(unsigned long);\n" "unsigned long __readgsdword(unsigned long);\n" "unsigned __int64 __readgsqword(unsigned long);\n" "unsigned short __readgsword(unsigned long);\n" "unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,\n" " unsigned __int64 _HighPart,\n" " unsigned char _Shift);\n" "unsigned __int64 __shiftright128(unsigned __int64 _LowPart,\n" " unsigned __int64 _HighPart,\n" " unsigned char _Shift);\n" "void __stosq(unsigned __int64 *, unsigned __int64, size_t);\n" "unsigned char __vmx_on(unsigned __int64 *);\n" "unsigned char __vmx_vmclear(unsigned __int64 *);\n" "unsigned char __vmx_vmlaunch(void);\n" "unsigned char __vmx_vmptrld(unsigned __int64 *);\n" "unsigned char __vmx_vmread(size_t, size_t *);\n" "unsigned char __vmx_vmresume(void);\n" "unsigned char __vmx_vmwrite(size_t, size_t);\n" "void __writegsbyte(unsigned long, unsigned char);\n" "void __writegsdword(unsigned long, unsigned long);\n" "void __writegsqword(unsigned long, unsigned __int64);\n" "void __writegsword(unsigned long, unsigned short);\n" "unsigned char _bittest64(__int64 const *, __int64);\n" "unsigned char _bittestandcomplement64(__int64 *, __int64);\n" "unsigned char _bittestandreset64(__int64 *, __int64);\n" "unsigned char _bittestandset64(__int64 *, __int64);\n" "long _InterlockedAnd_np(long volatile *_Value, long _Mask);\n" "short _InterlockedAnd16_np(short volatile *_Value, short _Mask);\n" "__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);\n" "char _InterlockedAnd8_np(char volatile *_Value, char _Mask);\n" "unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);\n" "unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);\n" "long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,\n" " long _Comparand);\n" "unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,\n" " __int64 _ExchangeHigh,\n" " __int64 _ExchangeLow,\n" " __int64 *_ComparandResult);\n" "short _InterlockedCompareExchange16_np(short volatile *_Destination,\n" " short _Exchange, short _Comparand);\n" "__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand);\n" "void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,\n" " void *_Exchange, void *_Comparand);\n" "long _InterlockedOr_np(long volatile *_Value, long _Mask);\n" "short _InterlockedOr16_np(short volatile *_Value, short _Mask);\n" "__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);\n" "char _InterlockedOr8_np(char volatile *_Value, char _Mask);\n" "long _InterlockedXor_np(long volatile *_Value, long _Mask);\n" "short _InterlockedXor16_np(short volatile *_Value, short _Mask);\n" "__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);\n" "char _InterlockedXor8_np(char volatile *_Value, char _Mask);\n" "unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);\n" "__int64 _sarx_i64(__int64, unsigned int);\n" "unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);\n" "unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);\n" "__int64 __mulh(__int64, __int64);\n" "unsigned __int64 __umulh(unsigned __int64, unsigned __int64);\n" "__int64 _mul128(__int64, __int64, __int64*);\n" "unsigned __int64 _umul128(unsigned __int64,\n" " unsigned __int64,\n" " unsigned __int64*);\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)\n" "\n" "unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);\n" "unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);\n" "\n" "#endif\n" "\n" "#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)\n" "__int64 _InterlockedDecrement64(__int64 volatile *_Addend);\n" "__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);\n" "__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);\n" "__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);\n" "__int64 _InterlockedIncrement64(__int64 volatile *_Addend);\n" "__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);\n" "\n" "#endif\n" "\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Exchange Add\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);\n" "char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);\n" "char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);\n" "short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);\n" "short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);\n" "short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);\n" "long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);\n" "long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);\n" "long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);\n" "__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);\n" "__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);\n" "__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Increment\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "short _InterlockedIncrement16_acq(short volatile *_Value);\n" "short _InterlockedIncrement16_nf(short volatile *_Value);\n" "short _InterlockedIncrement16_rel(short volatile *_Value);\n" "long _InterlockedIncrement_acq(long volatile *_Value);\n" "long _InterlockedIncrement_nf(long volatile *_Value);\n" "long _InterlockedIncrement_rel(long volatile *_Value);\n" "__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);\n" "__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);\n" "__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Decrement\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "short _InterlockedDecrement16_acq(short volatile *_Value);\n" "short _InterlockedDecrement16_nf(short volatile *_Value);\n" "short _InterlockedDecrement16_rel(short volatile *_Value);\n" "long _InterlockedDecrement_acq(long volatile *_Value);\n" "long _InterlockedDecrement_nf(long volatile *_Value);\n" "long _InterlockedDecrement_rel(long volatile *_Value);\n" "__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);\n" "__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);\n" "__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked And\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);\n" "char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);\n" "char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);\n" "short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);\n" "short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);\n" "short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);\n" "long _InterlockedAnd_acq(long volatile *_Value, long _Mask);\n" "long _InterlockedAnd_nf(long volatile *_Value, long _Mask);\n" "long _InterlockedAnd_rel(long volatile *_Value, long _Mask);\n" "__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Bit Counting and Testing\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,\n" " long _BitPos);\n" "unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,\n" " long _BitPos);\n" "unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,\n" " long _BitPos);\n" "unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,\n" " long _BitPos);\n" "unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,\n" " long _BitPos);\n" "unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,\n" " long _BitPos);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Or\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedOr8_acq(char volatile *_Value, char _Mask);\n" "char _InterlockedOr8_nf(char volatile *_Value, char _Mask);\n" "char _InterlockedOr8_rel(char volatile *_Value, char _Mask);\n" "short _InterlockedOr16_acq(short volatile *_Value, short _Mask);\n" "short _InterlockedOr16_nf(short volatile *_Value, short _Mask);\n" "short _InterlockedOr16_rel(short volatile *_Value, short _Mask);\n" "long _InterlockedOr_acq(long volatile *_Value, long _Mask);\n" "long _InterlockedOr_nf(long volatile *_Value, long _Mask);\n" "long _InterlockedOr_rel(long volatile *_Value, long _Mask);\n" "__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Xor\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedXor8_acq(char volatile *_Value, char _Mask);\n" "char _InterlockedXor8_nf(char volatile *_Value, char _Mask);\n" "char _InterlockedXor8_rel(char volatile *_Value, char _Mask);\n" "short _InterlockedXor16_acq(short volatile *_Value, short _Mask);\n" "short _InterlockedXor16_nf(short volatile *_Value, short _Mask);\n" "short _InterlockedXor16_rel(short volatile *_Value, short _Mask);\n" "long _InterlockedXor_acq(long volatile *_Value, long _Mask);\n" "long _InterlockedXor_nf(long volatile *_Value, long _Mask);\n" "long _InterlockedXor_rel(long volatile *_Value, long _Mask);\n" "__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);\n" "__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Exchange\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedExchange8_acq(char volatile *_Target, char _Value);\n" "char _InterlockedExchange8_nf(char volatile *_Target, char _Value);\n" "char _InterlockedExchange8_rel(char volatile *_Target, char _Value);\n" "short _InterlockedExchange16_acq(short volatile *_Target, short _Value);\n" "short _InterlockedExchange16_nf(short volatile *_Target, short _Value);\n" "short _InterlockedExchange16_rel(short volatile *_Target, short _Value);\n" "long _InterlockedExchange_acq(long volatile *_Target, long _Value);\n" "long _InterlockedExchange_nf(long volatile *_Target, long _Value);\n" "long _InterlockedExchange_rel(long volatile *_Target, long _Value);\n" "__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);\n" "__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);\n" "__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);\n" "#endif\n" "/*----------------------------------------------------------------------------*\\\n" "|* Interlocked Compare Exchange\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__arm__) || defined(__aarch64__)\n" "char _InterlockedCompareExchange8_acq(char volatile *_Destination,\n" " char _Exchange, char _Comparand);\n" "char _InterlockedCompareExchange8_nf(char volatile *_Destination,\n" " char _Exchange, char _Comparand);\n" "char _InterlockedCompareExchange8_rel(char volatile *_Destination,\n" " char _Exchange, char _Comparand);\n" "short _InterlockedCompareExchange16_acq(short volatile *_Destination,\n" " short _Exchange, short _Comparand);\n" "short _InterlockedCompareExchange16_nf(short volatile *_Destination,\n" " short _Exchange, short _Comparand);\n" "short _InterlockedCompareExchange16_rel(short volatile *_Destination,\n" " short _Exchange, short _Comparand);\n" "long _InterlockedCompareExchange_acq(long volatile *_Destination,\n" " long _Exchange, long _Comparand);\n" "long _InterlockedCompareExchange_nf(long volatile *_Destination,\n" " long _Exchange, long _Comparand);\n" "long _InterlockedCompareExchange_rel(long volatile *_Destination,\n" " long _Exchange, long _Comparand);\n" "__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand);\n" "__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand);\n" "__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,\n" " __int64 _Exchange, __int64 _Comparand);\n" "#endif\n" "#if defined(__x86_64__) || defined(__aarch64__)\n" "unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,\n" " __int64 _ExchangeHigh,\n" " __int64 _ExchangeLow,\n" " __int64 *_ComparandResult);\n" "#endif\n" "#if defined(__aarch64__)\n" "unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,\n" " __int64 _ExchangeHigh,\n" " __int64 _ExchangeLow,\n" " __int64 *_ComparandResult);\n" "unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,\n" " __int64 _ExchangeHigh,\n" " __int64 _ExchangeLow,\n" " __int64 *_ComparandResult);\n" "unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,\n" " __int64 _ExchangeHigh,\n" " __int64 _ExchangeLow,\n" " __int64 *_ComparandResult);\n" "#endif\n" "\n" "/*----------------------------------------------------------------------------*\\\n" "|* movs, stos\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__i386__) || defined(__x86_64__)\n" "static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,\n" " unsigned char const *__src,\n" " size_t __n) {\n" "#if defined(__x86_64__)\n" " __asm__ __volatile__(\"rep movsb\"\n" " : \"+D\"(__dst), \"+S\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#else\n" " __asm__ __volatile__(\"xchg {%%esi, %1|%1, esi}\\n\"\n" " \"rep movsb\\n\"\n" " \"xchg {%%esi, %1|%1, esi}\"\n" " : \"+D\"(__dst), \"+r\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#endif\n" "}\n" "static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst,\n" " unsigned long const *__src,\n" " size_t __n) {\n" "#if defined(__x86_64__)\n" " __asm__ __volatile__(\"rep movs{l|d}\"\n" " : \"+D\"(__dst), \"+S\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#else\n" " __asm__ __volatile__(\"xchg {%%esi, %1|%1, esi}\\n\"\n" " \"rep movs{l|d}\\n\"\n" " \"xchg {%%esi, %1|%1, esi}\"\n" " : \"+D\"(__dst), \"+r\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#endif\n" "}\n" "static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,\n" " unsigned short const *__src,\n" " size_t __n) {\n" "#if defined(__x86_64__)\n" " __asm__ __volatile__(\"rep movsw\"\n" " : \"+D\"(__dst), \"+S\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#else\n" " __asm__ __volatile__(\"xchg {%%esi, %1|%1, esi}\\n\"\n" " \"rep movsw\\n\"\n" " \"xchg {%%esi, %1|%1, esi}\"\n" " : \"+D\"(__dst), \"+r\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "#endif\n" "}\n" "static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst,\n" " unsigned long __x,\n" " size_t __n) {\n" " __asm__ __volatile__(\"rep stos{l|d}\"\n" " : \"+D\"(__dst), \"+c\"(__n)\n" " : \"a\"(__x)\n" " : \"memory\");\n" "}\n" "static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst,\n" " unsigned short __x,\n" " size_t __n) {\n" " __asm__ __volatile__(\"rep stosw\"\n" " : \"+D\"(__dst), \"+c\"(__n)\n" " : \"a\"(__x)\n" " : \"memory\");\n" "}\n" "#endif\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS __movsq(\n" " unsigned long long *__dst, unsigned long long const *__src, size_t __n) {\n" " __asm__ __volatile__(\"rep movsq\"\n" " : \"+D\"(__dst), \"+S\"(__src), \"+c\"(__n)\n" " :\n" " : \"memory\");\n" "}\n" "static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,\n" " unsigned __int64 __x,\n" " size_t __n) {\n" " __asm__ __volatile__(\"rep stosq\" : \"+D\"(__dst), \"+c\"(__n) : \"a\"(__x)\n" " : \"memory\");\n" "}\n" "#endif\n" "\n" "/*----------------------------------------------------------------------------*\\\n" "|* Misc\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__i386__) || defined(__x86_64__)\n" "static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {\n" " __asm__ volatile(\"hlt\");\n" "}\n" "#endif\n" "\n" "#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)\n" "static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {\n" " __asm__ volatile(\"nop\");\n" "}\n" "#endif\n" "\n" "/*----------------------------------------------------------------------------*\\\n" "|* MS AArch64 specific\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__aarch64__)\n" "unsigned __int64 __getReg(int);\n" "long _InterlockedAdd(long volatile *Addend, long Value);\n" "__int64 _ReadStatusReg(int);\n" "void _WriteStatusReg(int, __int64);\n" "\n" "unsigned short __cdecl _byteswap_ushort(unsigned short val);\n" "unsigned long __cdecl _byteswap_ulong (unsigned long val);\n" "unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);\n" "\n" "__int64 __mulh(__int64 __a, __int64 __b);\n" "unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);\n" "\n" "void __break(int);\n" "\n" "void __writex18byte(unsigned long offset, unsigned char data);\n" "void __writex18word(unsigned long offset, unsigned short data);\n" "void __writex18dword(unsigned long offset, unsigned long data);\n" "void __writex18qword(unsigned long offset, unsigned __int64 data);\n" "\n" "unsigned char __readx18byte(unsigned long offset);\n" "unsigned short __readx18word(unsigned long offset);\n" "unsigned long __readx18dword(unsigned long offset);\n" "unsigned __int64 __readx18qword(unsigned long offset);\n" "#endif\n" "\n" "/*----------------------------------------------------------------------------*\\\n" "|* Privileged intrinsics\n" "\\*----------------------------------------------------------------------------*/\n" "#if defined(__i386__) || defined(__x86_64__)\n" "static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS\n" "__readmsr(unsigned long __register) {\n" " // Loads the contents of a 64-bit model specific register (MSR) specified in\n" " // the ECX register into registers EDX:EAX. The EDX register is loaded with\n" " // the high-order 32 bits of the MSR and the EAX register is loaded with the\n" " // low-order 32 bits. If less than 64 bits are implemented in the MSR being\n" " // read, the values returned to EDX:EAX in unimplemented bit locations are\n" " // undefined.\n" " unsigned long __edx;\n" " unsigned long __eax;\n" " __asm__ (\"rdmsr\" : \"=d\"(__edx), \"=a\"(__eax) : \"c\"(__register));\n" " return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;\n" "}\n" "#endif\n" "\n" "static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) {\n" " unsigned __LPTRINT_TYPE__ __cr3_val;\n" " __asm__ __volatile__(\n" " \"mov {%%cr3, %0|%0, cr3}\"\n" " : \"=r\"(__cr3_val)\n" " :\n" " : \"memory\");\n" " return __cr3_val;\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {\n" " __asm__ (\"mov {%0, %%cr3|cr3, %0}\" : : \"r\"(__cr3_val) : \"memory\");\n" "}\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "\n" "#undef __LPTRINT_TYPE__\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __INTRIN_H */\n" "#endif /* _MSC_VER */\n" "" } , { "/builtins/inttypes.h" , "/*===---- inttypes.h - Standard header for integer printf macros ----------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#if !defined(__CLANG_INTTYPES_H) || !defined(_INTTYPES_H)\n" "// AIX system headers need inttypes.h to be re-enterable while _STD_TYPES_T\n" "// is defined until an inclusion of it without _STD_TYPES_T occurs, in which\n" "// case the header guard macro is defined.\n" "#if !defined(_AIX) || !defined(_STD_TYPES_T)\n" "#define __CLANG_INTTYPES_H\n" "#endif\n" "\n" "#if defined(_MSC_VER) && _MSC_VER < 1800\n" "#error MSVC does not have inttypes.h prior to Visual Studio 2013\n" "#endif\n" "\n" "#include_next \n" "\n" "#if defined(_MSC_VER) && _MSC_VER < 1900\n" "/* MSVC headers define int32_t as int, but PRIx32 as \"lx\" instead of \"x\".\n" " * This triggers format warnings, so fix it up here. */\n" "#undef PRId32\n" "#undef PRIdLEAST32\n" "#undef PRIdFAST32\n" "#undef PRIi32\n" "#undef PRIiLEAST32\n" "#undef PRIiFAST32\n" "#undef PRIo32\n" "#undef PRIoLEAST32\n" "#undef PRIoFAST32\n" "#undef PRIu32\n" "#undef PRIuLEAST32\n" "#undef PRIuFAST32\n" "#undef PRIx32\n" "#undef PRIxLEAST32\n" "#undef PRIxFAST32\n" "#undef PRIX32\n" "#undef PRIXLEAST32\n" "#undef PRIXFAST32\n" "\n" "#undef SCNd32\n" "#undef SCNdLEAST32\n" "#undef SCNdFAST32\n" "#undef SCNi32\n" "#undef SCNiLEAST32\n" "#undef SCNiFAST32\n" "#undef SCNo32\n" "#undef SCNoLEAST32\n" "#undef SCNoFAST32\n" "#undef SCNu32\n" "#undef SCNuLEAST32\n" "#undef SCNuFAST32\n" "#undef SCNx32\n" "#undef SCNxLEAST32\n" "#undef SCNxFAST32\n" "\n" "#define PRId32 \"d\"\n" "#define PRIdLEAST32 \"d\"\n" "#define PRIdFAST32 \"d\"\n" "#define PRIi32 \"i\"\n" "#define PRIiLEAST32 \"i\"\n" "#define PRIiFAST32 \"i\"\n" "#define PRIo32 \"o\"\n" "#define PRIoLEAST32 \"o\"\n" "#define PRIoFAST32 \"o\"\n" "#define PRIu32 \"u\"\n" "#define PRIuLEAST32 \"u\"\n" "#define PRIuFAST32 \"u\"\n" "#define PRIx32 \"x\"\n" "#define PRIxLEAST32 \"x\"\n" "#define PRIxFAST32 \"x\"\n" "#define PRIX32 \"X\"\n" "#define PRIXLEAST32 \"X\"\n" "#define PRIXFAST32 \"X\"\n" "\n" "#define SCNd32 \"d\"\n" "#define SCNdLEAST32 \"d\"\n" "#define SCNdFAST32 \"d\"\n" "#define SCNi32 \"i\"\n" "#define SCNiLEAST32 \"i\"\n" "#define SCNiFAST32 \"i\"\n" "#define SCNo32 \"o\"\n" "#define SCNoLEAST32 \"o\"\n" "#define SCNoFAST32 \"o\"\n" "#define SCNu32 \"u\"\n" "#define SCNuLEAST32 \"u\"\n" "#define SCNuFAST32 \"u\"\n" "#define SCNx32 \"x\"\n" "#define SCNxLEAST32 \"x\"\n" "#define SCNxFAST32 \"x\"\n" "#endif\n" "\n" "#endif /* __CLANG_INTTYPES_H */\n" "" } , { "/builtins/invpcidintrin.h" , "/*===------------- invpcidintrin.h - INVPCID intrinsic ---------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __INVPCIDINTRIN_H\n" "#define __INVPCIDINTRIN_H\n" "\n" "static __inline__ void\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"invpcid\")))\n" "_invpcid(unsigned int __type, void *__descriptor) {\n" " __builtin_ia32_invpcid(__type, __descriptor);\n" "}\n" "\n" "#endif /* __INVPCIDINTRIN_H */\n" "" } , { "/builtins/iso646.h" , "/*===---- iso646.h - Standard header for alternate spellings of operators---===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __ISO646_H\n" "#define __ISO646_H\n" "\n" "#ifndef __cplusplus\n" "#define and &&\n" "#define and_eq &=\n" "#define bitand &\n" "#define bitor |\n" "#define compl ~\n" "#define not !\n" "#define not_eq !=\n" "#define or ||\n" "#define or_eq |=\n" "#define xor ^\n" "#define xor_eq ^=\n" "#endif\n" "\n" "#endif /* __ISO646_H */\n" "" } , { "/builtins/keylockerintrin.h" , "/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===\n" " *\n" " * Permission is hereby granted, free of charge, to any person obtaining a copy\n" " * of this software and associated documentation files (the \"Software\"), to deal\n" " * in the Software without restriction, including without limitation the rights\n" " * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n" " * copies of the Software, and to permit persons to whom the Software is\n" " * furnished to do so, subject to the following conditions:\n" " *\n" " * The above copyright notice and this permission notice shall be included in\n" " * all copies or substantial portions of the Software.\n" " *\n" " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n" " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n" " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n" " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n" " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n" " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n" " * THE SOFTWARE.\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef _KEYLOCKERINTRIN_H\n" "#define _KEYLOCKERINTRIN_H\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__KL__)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"kl\"),\\\n" " __min_vector_width__(128)))\n" "\n" "/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl\n" "/// will assigned to EAX, whch specifies the KeySource and whether backing up\n" "/// the key is permitted. The 256-bit encryption key is loaded from the two\n" "/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is\n" "/// loaded from the implicit operand XMM0 which assigned by __intkey.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LOADIWKEY instructions.\n" "///\n" "/// \\code{.operation}\n" "/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)\n" "/// GP (0)\n" "/// FI\n" "/// IF “LOADIWKEY exiting” VM execution control set\n" "/// VMexit\n" "/// FI\n" "/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used\n" "/// GP (0)\n" "/// FI\n" "/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set\n" "/// GP (0)\n" "/// FI\n" "/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part\n" "/// GP (0)\n" "/// FI\n" "/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part\n" "/// GP (0)\n" "/// FI\n" "/// IF (__ctl[4:1] == 0) // KeySource of 0.\n" "/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:\n" "/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0]\n" "/// IWKey.IntegrityKey[127:0] := __intkey[127:0]\n" "/// IWKey.NoBackup := __ctl[0]\n" "/// IWKey.KeySource := __ctl[4:1]\n" "/// ZF := 0\n" "/// ELSE // KeySource of 1. See RDSEED definition for details of randomness\n" "/// IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received\n" "/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]\n" "/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]\n" "/// IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]\n" "/// IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]\n" "/// IWKey.NoBackup := __ctl[0]\n" "/// IWKey.KeySource := __ctl[4:1]\n" "/// ZF := 0\n" "/// ELSE // Random data was not returned from RDSEED. IWKey was not loaded\n" "/// ZF := 1\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,\n" " __m128i __enkey_lo, __m128i __enkey_hi) {\n" " __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);\n" "}\n" "\n" "/// Wrap a 128-bit AES key from __key into a key handle and output in\n" "/// ((__m128i*)__h) to ((__m128i*)__h) + 2 and a 32-bit value as return.\n" "/// The explicit source operand __htype specifies handle restrictions.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the ENCODEKEY128 instructions.\n" "///\n" "/// \\code{.operation}\n" "/// InputKey[127:0] := __key[127:0]\n" "/// KeyMetadata[2:0] := __htype[2:0]\n" "/// KeyMetadata[23:3] := 0 // Reserved for future usage\n" "/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)\n" "/// KeyMetadata[127:28] := 0 // Reserved for future usage\n" "/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],\n" "/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])\n" "/// dst[0] := IWKey.NoBackup\n" "/// dst[4:1] := IWKey.KeySource[3:0]\n" "/// dst[31:5] := 0\n" "/// MEM[__h+127:__h] := Handle[127:0] // AAD\n" "/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag\n" "/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText\n" "/// OF := 0\n" "/// SF := 0\n" "/// ZF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {\n" " return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);\n" "}\n" "\n" "/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then\n" "/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 3 and\n" "/// a 32-bit value as return.\n" "/// The explicit source operand __htype specifies handle restrictions.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the ENCODEKEY256 instructions.\n" "///\n" "/// \\code{.operation}\n" "/// InputKey[127:0] := __key_lo[127:0]\n" "/// InputKey[255:128] := __key_hi[255:128]\n" "/// KeyMetadata[2:0] := __htype[2:0]\n" "/// KeyMetadata[23:3] := 0 // Reserved for future usage\n" "/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)\n" "/// KeyMetadata[127:28] := 0 // Reserved for future usage\n" "/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],\n" "/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])\n" "/// dst[0] := IWKey.NoBackup\n" "/// dst[4:1] := IWKey.KeySource[3:0]\n" "/// dst[31:5] := 0\n" "/// MEM[__h+127:__h] := Handle[127:0] // AAD\n" "/// MEM[__h+255:__h+128] := Handle[255:128] // Tag\n" "/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]\n" "/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]\n" "/// OF := 0\n" "/// SF := 0\n" "/// ZF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,\n" " void *__h) {\n" " return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,\n" " (__v2di)__key_hi, __h);\n" "}\n" "\n" "/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using\n" "/// the 128-bit key in the handle from the __h. It stores the result in the\n" "/// __odata. And return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESENC128KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.\n" "/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[383:256] ||\n" "/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)\n" "/// IF (Authentic == 0)\n" "/// ZF := 1\n" "/// ELSE\n" "/// MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {\n" " return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);\n" "}\n" "\n" "/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using\n" "/// the 256-bit key in the handle from the __h. It stores the result in the\n" "/// __odata. And return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESENC256KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.\n" "/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[255:128] ||\n" "/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)\n" "/// IF (Authentic == 0)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {\n" " return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);\n" "}\n" "\n" "/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using\n" "/// the 128-bit key in the handle from the __h. It stores the result in the\n" "/// __odata. And return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESDEC128KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.\n" "/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[383:256] ||\n" "/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)\n" "/// IF (Authentic == 0)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {\n" " return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);\n" "}\n" "\n" "/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using\n" "/// the 256-bit key in the handle from the __h. It stores the result in the\n" "/// __odata. And return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESDEC256KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[511:0] := MEM[__h+511:__h]\n" "/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[383:256] ||\n" "/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)\n" "/// IF (Authentic == 0)\n" "/// ZF := 1\n" "/// MEM[__odata+127:__odata] := 0\n" "/// ELSE\n" "/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {\n" " return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \\\n" " || defined(__KL__) */\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__WIDEKL__)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"kl,widekl\"),\\\n" " __min_vector_width__(128)))\n" "\n" "/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle\n" "/// at __h and store each resultant block back from __odata to __odata+7. And\n" "/// return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESENCWIDE128KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle := MEM[__h+383:__h]\n" "/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[255:128] ||\n" "/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)\n" "/// IF Authentic == 0\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)\n" "/// ENDFOR\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {\n" " return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,\n" " (const __v2di *)__idata, __h);\n" "}\n" "\n" "/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle\n" "/// at __h and store each resultant block back from __odata to __odata+7. And\n" "/// return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESENCWIDE256KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[511:0] := MEM[__h+511:__h]\n" "/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[255:128] ||\n" "/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)\n" "/// IF Authentic == 0\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)\n" "/// ENDFOR\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {\n" " return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,\n" " (const __v2di *)__idata, __h);\n" "}\n" "\n" "/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle\n" "/// at __h and store each resultant block back from __odata to __odata+7. And\n" "/// return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESDECWIDE128KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[383:0] := MEM[__h+383:__h]\n" "/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[255:128] ||\n" "/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )\n" "/// IF (IllegalHandle)\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)\n" "/// IF Authentic == 0\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)\n" "/// ENDFOR\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {\n" " return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,\n" " (const __v2di *)__idata, __h);\n" "}\n" "\n" "/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle\n" "/// at __h and store each resultant block back from __odata to __odata+7. And\n" "/// return the affected ZF flag status.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the AESDECWIDE256KL instructions.\n" "///\n" "/// \\code{.operation}\n" "/// Handle[511:0] := MEM[__h+511:__h]\n" "/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||\n" "/// (Handle[127:0] AND (CPL > 0)) ||\n" "/// Handle[255:128] ||\n" "/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )\n" "/// If (IllegalHandle)\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)\n" "/// IF Authentic == 0\n" "/// ZF := 1\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := 0\n" "/// ENDFOR\n" "/// ELSE\n" "/// FOR i := 0 to 7\n" "/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)\n" "/// ENDFOR\n" "/// ZF := 0\n" "/// FI\n" "/// FI\n" "/// dst := ZF\n" "/// OF := 0\n" "/// SF := 0\n" "/// AF := 0\n" "/// PF := 0\n" "/// CF := 0\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {\n" " return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,\n" " (const __v2di *)__idata, __h);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \\\n" " || defined(__WIDEKL__) */\n" "\n" "#endif /* _KEYLOCKERINTRIN_H */\n" "" } , { "/builtins/larchintrin.h" , "/*===------------ larchintrin.h - LoongArch intrinsics ---------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef _LOONGARCH_BASE_INTRIN_H\n" "#define _LOONGARCH_BASE_INTRIN_H\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "typedef struct rdtime {\n" " unsigned int value;\n" " unsigned int timeid;\n" "} __rdtime_t;\n" "\n" "#if __loongarch_grlen == 64\n" "typedef struct drdtime {\n" " unsigned long dvalue;\n" " unsigned long dtimeid;\n" "} __drdtime_t;\n" "\n" "extern __inline __drdtime_t\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __rdtime_d(void) {\n" " __drdtime_t __drdtime;\n" " __asm__ volatile(\n" " \"rdtime.d %[val], %[tid]\\n\\t\"\n" " : [val] \"=&r\"(__drdtime.dvalue), [tid] \"=&r\"(__drdtime.dtimeid));\n" " return __drdtime;\n" "}\n" "#endif\n" "\n" "extern __inline __rdtime_t\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __rdtimeh_w(void) {\n" " __rdtime_t __rdtime;\n" " __asm__ volatile(\"rdtimeh.w %[val], %[tid]\\n\\t\"\n" " : [val] \"=&r\"(__rdtime.value), [tid] \"=&r\"(__rdtime.timeid));\n" " return __rdtime;\n" "}\n" "\n" "extern __inline __rdtime_t\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __rdtimel_w(void) {\n" " __rdtime_t __rdtime;\n" " __asm__ volatile(\"rdtimel.w %[val], %[tid]\\n\\t\"\n" " : [val] \"=&r\"(__rdtime.value), [tid] \"=&r\"(__rdtime.timeid));\n" " return __rdtime;\n" "}\n" "\n" "#if __loongarch_grlen == 64\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crc_w_b_w(char _1, int _2) {\n" " return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crc_w_h_w(short _1, int _2) {\n" " return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crc_w_w_w(int _1, int _2) {\n" " return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crc_w_d_w(long int _1, int _2) {\n" " return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crcc_w_b_w(char _1, int _2) {\n" " return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crcc_w_h_w(short _1, int _2) {\n" " return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crcc_w_w_w(int _1, int _2) {\n" " return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2);\n" "}\n" "\n" "extern __inline int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __crcc_w_d_w(long int _1, int _2) {\n" " return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2);\n" "}\n" "#endif\n" "\n" "#define __break(/*ui15*/ _1) __builtin_loongarch_break((_1))\n" "\n" "#if __loongarch_grlen == 32\n" "#define __cacop_w(/*uimm5*/ _1, /*unsigned int*/ _2, /*simm12*/ _3) \\\n" " ((void)__builtin_loongarch_cacop_w((_1), (unsigned int)(_2), (_3)))\n" "#endif\n" "\n" "#if __loongarch_grlen == 64\n" "#define __cacop_d(/*uimm5*/ _1, /*unsigned long int*/ _2, /*simm12*/ _3) \\\n" " ((void)__builtin_loongarch_cacop_d((_1), (unsigned long int)(_2), (_3)))\n" "#endif\n" "\n" "#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1))\n" "\n" "#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1))\n" "\n" "#define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr((_1));\n" "\n" "#define __movgr2fcsr(/*ui5*/ _1, _2) \\\n" " __builtin_loongarch_movgr2fcsr((_1), (unsigned int)_2);\n" "\n" "#define __syscall(/*ui15*/ _1) __builtin_loongarch_syscall((_1))\n" "\n" "#define __csrrd_w(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd_w((_1)))\n" "\n" "#define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \\\n" " ((unsigned int)__builtin_loongarch_csrwr_w((unsigned int)(_1), (_2)))\n" "\n" "#define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \\\n" " ((unsigned int)__builtin_loongarch_csrxchg_w((unsigned int)(_1), \\\n" " (unsigned int)(_2), (_3)))\n" "\n" "#if __loongarch_grlen == 64\n" "#define __csrrd_d(/*ui14*/ _1) \\\n" " ((unsigned long int)__builtin_loongarch_csrrd_d((_1)))\n" "\n" "#define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \\\n" " ((unsigned long int)__builtin_loongarch_csrwr_d((unsigned long int)(_1), \\\n" " (_2)))\n" "\n" "#define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \\\n" " /*ui14*/ _3) \\\n" " ((unsigned long int)__builtin_loongarch_csrxchg_d( \\\n" " (unsigned long int)(_1), (unsigned long int)(_2), (_3)))\n" "#endif\n" "\n" "extern __inline unsigned char\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrrd_b(unsigned int _1) {\n" " return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1);\n" "}\n" "\n" "extern __inline unsigned char\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrrd_h(unsigned int _1) {\n" " return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1);\n" "}\n" "\n" "extern __inline unsigned int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrrd_w(unsigned int _1) {\n" " return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1);\n" "}\n" "\n" "#if __loongarch_grlen == 64\n" "extern __inline unsigned long int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrrd_d(unsigned int _1) {\n" " return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1);\n" "}\n" "#endif\n" "\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrwr_b(unsigned char _1, unsigned int _2) {\n" " __builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2);\n" "}\n" "\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrwr_h(unsigned short _1, unsigned int _2) {\n" " __builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2);\n" "}\n" "\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrwr_w(unsigned int _1, unsigned int _2) {\n" " __builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2);\n" "}\n" "\n" "extern __inline unsigned int\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __cpucfg(unsigned int _1) {\n" " return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1);\n" "}\n" "\n" "#if __loongarch_grlen == 64\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __iocsrwr_d(unsigned long int _1, unsigned int _2) {\n" " __builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2);\n" "}\n" "\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __asrtgt_d(long int _1, long int _2) {\n" " __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2);\n" "}\n" "\n" "extern __inline void\n" " __attribute__((__gnu_inline__, __always_inline__, __artificial__))\n" " __asrtle_d(long int _1, long int _2) {\n" " __builtin_loongarch_asrtle_d((long int)_1, (long int)_2);\n" "}\n" "#endif\n" "\n" "#if __loongarch_grlen == 64\n" "#define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \\\n" " ((long int)__builtin_loongarch_lddir_d((long int)(_1), (_2)))\n" "\n" "#define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \\\n" " ((void)__builtin_loongarch_ldpte_d((long int)(_1), (_2)))\n" "#endif\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "#endif /* _LOONGARCH_BASE_INTRIN_H */\n" "" } , { "/builtins/limits.h" , "/*===---- limits.h - Standard header for integer sizes --------------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#if !defined(__CLANG_LIMITS_H) || !defined(_LIBC_LIMITS_H_)\n" "#define __CLANG_LIMITS_H\n" "\n" "/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.\n" " Avert this #include_next madness. */\n" "#if defined __GNUC__ && !defined _GCC_LIMITS_H_\n" "#define _GCC_LIMITS_H_\n" "#endif\n" "\n" "/* System headers include a number of constants from POSIX in .\n" " Include it if we're hosted. */\n" "#if __STDC_HOSTED__ && __has_include_next()\n" "#include_next \n" "#endif\n" "\n" "/* Many system headers try to \"help us out\" by defining these. No really, we\n" " know how big each datatype is. */\n" "#undef SCHAR_MIN\n" "#undef SCHAR_MAX\n" "#undef UCHAR_MAX\n" "#undef SHRT_MIN\n" "#undef SHRT_MAX\n" "#undef USHRT_MAX\n" "#undef INT_MIN\n" "#undef INT_MAX\n" "#undef UINT_MAX\n" "#undef LONG_MIN\n" "#undef LONG_MAX\n" "#undef ULONG_MAX\n" "\n" "#undef CHAR_BIT\n" "#undef CHAR_MIN\n" "#undef CHAR_MAX\n" "\n" "/* C90/99 5.2.4.2.1 */\n" "#define SCHAR_MAX __SCHAR_MAX__\n" "#define SHRT_MAX __SHRT_MAX__\n" "#define INT_MAX __INT_MAX__\n" "#define LONG_MAX __LONG_MAX__\n" "\n" "#define SCHAR_MIN (-__SCHAR_MAX__-1)\n" "#define SHRT_MIN (-__SHRT_MAX__ -1)\n" "#define INT_MIN (-__INT_MAX__ -1)\n" "#define LONG_MIN (-__LONG_MAX__ -1L)\n" "\n" "#define UCHAR_MAX (__SCHAR_MAX__*2 +1)\n" "#if __SHRT_WIDTH__ < __INT_WIDTH__\n" "#define USHRT_MAX (__SHRT_MAX__ * 2 + 1)\n" "#else\n" "#define USHRT_MAX (__SHRT_MAX__ * 2U + 1U)\n" "#endif\n" "#define UINT_MAX (__INT_MAX__ *2U +1U)\n" "#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)\n" "\n" "#ifndef MB_LEN_MAX\n" "#define MB_LEN_MAX 1\n" "#endif\n" "\n" "#define CHAR_BIT __CHAR_BIT__\n" "\n" "/* C2x 5.2.4.2.1 */\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "#define BOOL_WIDTH __BOOL_WIDTH__\n" "#define CHAR_WIDTH CHAR_BIT\n" "#define SCHAR_WIDTH CHAR_BIT\n" "#define UCHAR_WIDTH CHAR_BIT\n" "#define USHRT_WIDTH __SHRT_WIDTH__\n" "#define SHRT_WIDTH __SHRT_WIDTH__\n" "#define UINT_WIDTH __INT_WIDTH__\n" "#define INT_WIDTH __INT_WIDTH__\n" "#define ULONG_WIDTH __LONG_WIDTH__\n" "#define LONG_WIDTH __LONG_WIDTH__\n" "#define ULLONG_WIDTH __LLONG_WIDTH__\n" "#define LLONG_WIDTH __LLONG_WIDTH__\n" "\n" "#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__\n" "#endif\n" "\n" "#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */\n" "#define CHAR_MIN 0\n" "#define CHAR_MAX UCHAR_MAX\n" "#else\n" "#define CHAR_MIN SCHAR_MIN\n" "#define CHAR_MAX __SCHAR_MAX__\n" "#endif\n" "\n" "/* C99 5.2.4.2.1: Added long long.\n" " C++11 18.3.3.2: same contents as the Standard C Library header .\n" " */\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L)\n" "\n" "#undef LLONG_MIN\n" "#undef LLONG_MAX\n" "#undef ULLONG_MAX\n" "\n" "#define LLONG_MAX __LONG_LONG_MAX__\n" "#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)\n" "#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)\n" "#endif\n" "\n" "/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad\n" " that we don't have something like #pragma poison that could be used to\n" " deprecate a macro - the code should just use LLONG_MAX and friends.\n" " */\n" "#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)\n" "\n" "#undef LONG_LONG_MIN\n" "#undef LONG_LONG_MAX\n" "#undef ULONG_LONG_MAX\n" "\n" "#define LONG_LONG_MAX __LONG_LONG_MAX__\n" "#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)\n" "#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)\n" "#endif\n" "\n" "#endif /* __CLANG_LIMITS_H */\n" "" } , { "/builtins/lwpintrin.h" , "/*===---- lwpintrin.h - LWP intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __LWPINTRIN_H\n" "#define __LWPINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"lwp\")))\n" "\n" "/// Parses the LWPCB at the specified address and enables\n" "/// profiling if valid.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LLWPCB instruction.\n" "///\n" "/// \\param __addr\n" "/// Address to the new Lightweight Profiling Control Block (LWPCB). If the\n" "/// LWPCB is valid, writes the address into the LWP_CBADDR MSR and enables\n" "/// Lightweight Profiling.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "__llwpcb (void *__addr)\n" "{\n" " __builtin_ia32_llwpcb(__addr);\n" "}\n" "\n" "/// Flushes the LWP state to memory and returns the address of the LWPCB.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the SLWPCB instruction.\n" "///\n" "/// \\return\n" "/// Address to the current Lightweight Profiling Control Block (LWPCB).\n" "/// If LWP is not currently enabled, returns NULL.\n" "static __inline__ void* __DEFAULT_FN_ATTRS\n" "__slwpcb (void)\n" "{\n" " return __builtin_ia32_slwpcb();\n" "}\n" "\n" "/// Inserts programmed event record into the LWP event ring buffer\n" "/// and advances the ring buffer pointer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LWPINS instruction.\n" "///\n" "/// \\param DATA2\n" "/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field.\n" "/// \\param DATA1\n" "/// A 32-bit value is inserted into the 32-bit Data1 field.\n" "/// \\param FLAGS\n" "/// A 32-bit immediate value is inserted into the 32-bit Flags field.\n" "/// \\returns If the ring buffer is full and LWP is running in Synchronized Mode,\n" "/// the event record overwrites the last record in the buffer, the MissedEvents\n" "/// counter in the LWPCB is incremented, the head pointer is not advanced, and\n" "/// 1 is returned. Otherwise 0 is returned.\n" "#define __lwpins32(DATA2, DATA1, FLAGS) \\\n" " (__builtin_ia32_lwpins32((unsigned int) (DATA2), (unsigned int) (DATA1), \\\n" " (unsigned int) (FLAGS)))\n" "\n" "/// Decrements the LWP programmed value sample event counter. If the result is\n" "/// negative, inserts an event record into the LWP event ring buffer in memory\n" "/// and advances the ring buffer pointer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LWPVAL instruction.\n" "///\n" "/// \\param DATA2\n" "/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field.\n" "/// \\param DATA1\n" "/// A 32-bit value is inserted into the 32-bit Data1 field.\n" "/// \\param FLAGS\n" "/// A 32-bit immediate value is inserted into the 32-bit Flags field.\n" "#define __lwpval32(DATA2, DATA1, FLAGS) \\\n" " (__builtin_ia32_lwpval32((unsigned int) (DATA2), (unsigned int) (DATA1), \\\n" " (unsigned int) (FLAGS)))\n" "\n" "#ifdef __x86_64__\n" "\n" "/// Inserts programmed event record into the LWP event ring buffer\n" "/// and advances the ring buffer pointer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LWPINS instruction.\n" "///\n" "/// \\param DATA2\n" "/// A 64-bit value is inserted into the 64-bit Data2 field.\n" "/// \\param DATA1\n" "/// A 32-bit value is inserted into the 32-bit Data1 field.\n" "/// \\param FLAGS\n" "/// A 32-bit immediate value is inserted into the 32-bit Flags field.\n" "/// \\returns If the ring buffer is full and LWP is running in Synchronized Mode,\n" "/// the event record overwrites the last record in the buffer, the MissedEvents\n" "/// counter in the LWPCB is incremented, the head pointer is not advanced, and\n" "/// 1 is returned. Otherwise 0 is returned.\n" "#define __lwpins64(DATA2, DATA1, FLAGS) \\\n" " (__builtin_ia32_lwpins64((unsigned long long) (DATA2), (unsigned int) (DATA1), \\\n" " (unsigned int) (FLAGS)))\n" "\n" "/// Decrements the LWP programmed value sample event counter. If the result is\n" "/// negative, inserts an event record into the LWP event ring buffer in memory\n" "/// and advances the ring buffer pointer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the LWPVAL instruction.\n" "///\n" "/// \\param DATA2\n" "/// A 64-bit value is and inserted into the 64-bit Data2 field.\n" "/// \\param DATA1\n" "/// A 32-bit value is inserted into the 32-bit Data1 field.\n" "/// \\param FLAGS\n" "/// A 32-bit immediate value is inserted into the 32-bit Flags field.\n" "#define __lwpval64(DATA2, DATA1, FLAGS) \\\n" " (__builtin_ia32_lwpval64((unsigned long long) (DATA2), (unsigned int) (DATA1), \\\n" " (unsigned int) (FLAGS)))\n" "\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __LWPINTRIN_H */\n" "" } , { "/builtins/lzcntintrin.h" , "/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __LZCNTINTRIN_H\n" "#define __LZCNTINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"lzcnt\")))\n" "\n" "#ifndef _MSC_VER\n" "/// Counts the number of leading zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c LZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 16-bit integer whose leading zeros are to be counted.\n" "/// \\returns An unsigned 16-bit integer containing the number of leading zero\n" "/// bits in the operand.\n" "#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X))\n" "#endif // _MSC_VER\n" "\n" "/// Counts the number of leading zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c LZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 32-bit integer whose leading zeros are to be counted.\n" "/// \\returns An unsigned 32-bit integer containing the number of leading zero\n" "/// bits in the operand.\n" "/// \\see _lzcnt_u32\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__lzcnt32(unsigned int __X)\n" "{\n" " return __builtin_ia32_lzcnt_u32(__X);\n" "}\n" "\n" "/// Counts the number of leading zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c LZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 32-bit integer whose leading zeros are to be counted.\n" "/// \\returns An unsigned 32-bit integer containing the number of leading zero\n" "/// bits in the operand.\n" "/// \\see __lzcnt32\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_lzcnt_u32(unsigned int __X)\n" "{\n" " return __builtin_ia32_lzcnt_u32(__X);\n" "}\n" "\n" "#ifdef __x86_64__\n" "#ifndef _MSC_VER\n" "/// Counts the number of leading zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c LZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose leading zeros are to be counted.\n" "/// \\returns An unsigned 64-bit integer containing the number of leading zero\n" "/// bits in the operand.\n" "/// \\see _lzcnt_u64\n" "#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X))\n" "#endif // _MSC_VER\n" "\n" "/// Counts the number of leading zero bits in the operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c LZCNT instruction.\n" "///\n" "/// \\param __X\n" "/// An unsigned 64-bit integer whose leading zeros are to be counted.\n" "/// \\returns An unsigned 64-bit integer containing the number of leading zero\n" "/// bits in the operand.\n" "/// \\see __lzcnt64\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "_lzcnt_u64(unsigned long long __X)\n" "{\n" " return __builtin_ia32_lzcnt_u64(__X);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __LZCNTINTRIN_H */\n" "" } , { "/builtins/mm3dnow.h" , "/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef _MM3DNOW_H_INCLUDED\n" "#define _MM3DNOW_H_INCLUDED\n" "\n" "#include \n" "#include \n" "\n" "typedef float __v2sf __attribute__((__vector_size__(8)));\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"3dnow\"), __min_vector_width__(64)))\n" "\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"3dnow\")))\n" "_m_femms(void) {\n" " __builtin_ia32_femms();\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pavgusb(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pf2id(__m64 __m) {\n" " return (__m64)__builtin_ia32_pf2id((__v2sf)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfacc(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfadd(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfcmpeq(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfcmpge(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfcmpgt(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfmax(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfmin(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfmul(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfrcp(__m64 __m) {\n" " return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfrcpit1(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfrcpit2(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfrsqrt(__m64 __m) {\n" " return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfsub(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfsubr(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pi2fd(__m64 __m) {\n" " return (__m64)__builtin_ia32_pi2fd((__v2si)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pmulhrw(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/* Handle the 3dnowa instructions here. */\n" "#undef __DEFAULT_FN_ATTRS\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"3dnowa\"), __min_vector_width__(64)))\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pf2iw(__m64 __m) {\n" " return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfnacc(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pfpnacc(__m64 __m1, __m64 __m2) {\n" " return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pi2fw(__m64 __m) {\n" " return (__m64)__builtin_ia32_pi2fw((__v2si)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pswapdsf(__m64 __m) {\n" " return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);\n" "}\n" "\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_m_pswapdsi(__m64 __m) {\n" " return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/mm_malloc.h" , "/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __MM_MALLOC_H\n" "#define __MM_MALLOC_H\n" "\n" "#include \n" "\n" "#ifdef _WIN32\n" "#include \n" "#else\n" "#ifndef __cplusplus\n" "extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size);\n" "#else\n" "// Some systems (e.g. those with GNU libc) declare posix_memalign with an\n" "// exception specifier. Via an \"egregious workaround\" in\n" "// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid\n" "// redeclaration of glibc's declaration.\n" "extern \"C\" int posix_memalign(void **__memptr, size_t __alignment, size_t __size);\n" "#endif\n" "#endif\n" "\n" "#if !(defined(_WIN32) && defined(_mm_malloc))\n" "static __inline__ void *__attribute__((__always_inline__, __nodebug__,\n" " __malloc__, __alloc_size__(1),\n" " __alloc_align__(2)))\n" "_mm_malloc(size_t __size, size_t __align) {\n" " if (__align == 1) {\n" " return malloc(__size);\n" " }\n" "\n" " if (!(__align & (__align - 1)) && __align < sizeof(void *))\n" " __align = sizeof(void *);\n" "\n" " void *__mallocedMemory;\n" "#if defined(__MINGW32__)\n" " __mallocedMemory = __mingw_aligned_malloc(__size, __align);\n" "#elif defined(_WIN32)\n" " __mallocedMemory = _aligned_malloc(__size, __align);\n" "#else\n" " if (posix_memalign(&__mallocedMemory, __align, __size))\n" " return 0;\n" "#endif\n" "\n" " return __mallocedMemory;\n" "}\n" "\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__))\n" "_mm_free(void *__p)\n" "{\n" "#if defined(__MINGW32__)\n" " __mingw_aligned_free(__p);\n" "#elif defined(_WIN32)\n" " _aligned_free(__p);\n" "#else\n" " free(__p);\n" "#endif\n" "}\n" "#endif\n" "\n" "#endif /* __MM_MALLOC_H */\n" "" } , { "/builtins/mmintrin.h" , "/*===---- mmintrin.h - MMX intrinsics --------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __MMINTRIN_H\n" "#define __MMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "typedef long long __m64 __attribute__((__vector_size__(8), __aligned__(8)));\n" "\n" "typedef long long __v1di __attribute__((__vector_size__(8)));\n" "typedef int __v2si __attribute__((__vector_size__(8)));\n" "typedef short __v4hi __attribute__((__vector_size__(8)));\n" "typedef char __v8qi __attribute__((__vector_size__(8)));\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"mmx\"), __min_vector_width__(64)))\n" "\n" "/// Clears the MMX state by setting the state of the x87 stack registers\n" "/// to empty.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the EMMS instruction.\n" "///\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__(\"mmx\")))\n" "_mm_empty(void)\n" "{\n" " __builtin_ia32_emms();\n" "}\n" "\n" "/// Constructs a 64-bit integer vector, setting the lower 32 bits to the\n" "/// value of the 32-bit integer parameter and setting the upper 32 bits to 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVD instruction.\n" "///\n" "/// \\param __i\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector. The lower 32 bits contain the value of the\n" "/// parameter. The upper 32 bits are set to 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cvtsi32_si64(int __i)\n" "{\n" " return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);\n" "}\n" "\n" "/// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit\n" "/// signed integer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector.\n" "/// \\returns A 32-bit signed integer value containing the lower 32 bits of the\n" "/// parameter.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_cvtsi64_si32(__m64 __m)\n" "{\n" " return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);\n" "}\n" "\n" "/// Casts a 64-bit signed integer value into a 64-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVQ instruction.\n" "///\n" "/// \\param __i\n" "/// A 64-bit signed integer.\n" "/// \\returns A 64-bit integer vector containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cvtsi64_m64(long long __i)\n" "{\n" " return (__m64)__i;\n" "}\n" "\n" "/// Casts a 64-bit integer vector into a 64-bit signed integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVQ instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector.\n" "/// \\returns A 64-bit signed integer containing the same bitwise pattern as the\n" "/// parameter.\n" "static __inline__ long long __DEFAULT_FN_ATTRS\n" "_mm_cvtm64_si64(__m64 __m)\n" "{\n" " return (long long)__m;\n" "}\n" "\n" "/// Converts 16-bit signed integers from both 64-bit integer vector\n" "/// parameters of [4 x i16] into 8-bit signed integer values, and constructs\n" "/// a 64-bit integer vector of [8 x i8] as the result. Positive values\n" "/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80\n" "/// are saturated to 0x80.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PACKSSWB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a\n" "/// 16-bit signed integer and is converted to an 8-bit signed integer with\n" "/// saturation. Positive values greater than 0x7F are saturated to 0x7F.\n" "/// Negative values less than 0x80 are saturated to 0x80. The converted\n" "/// [4 x i8] values are written to the lower 32 bits of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a\n" "/// 16-bit signed integer and is converted to an 8-bit signed integer with\n" "/// saturation. Positive values greater than 0x7F are saturated to 0x7F.\n" "/// Negative values less than 0x80 are saturated to 0x80. The converted\n" "/// [4 x i8] values are written to the upper 32 bits of the result.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the converted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_packs_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Converts 32-bit signed integers from both 64-bit integer vector\n" "/// parameters of [2 x i32] into 16-bit signed integer values, and constructs\n" "/// a 64-bit integer vector of [4 x i16] as the result. Positive values\n" "/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than\n" "/// 0x8000 are saturated to 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PACKSSDW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a\n" "/// 32-bit signed integer and is converted to a 16-bit signed integer with\n" "/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.\n" "/// Negative values less than 0x8000 are saturated to 0x8000. The converted\n" "/// [2 x i16] values are written to the lower 32 bits of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a\n" "/// 32-bit signed integer and is converted to a 16-bit signed integer with\n" "/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.\n" "/// Negative values less than 0x8000 are saturated to 0x8000. The converted\n" "/// [2 x i16] values are written to the upper 32 bits of the result.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the converted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_packs_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Converts 16-bit signed integers from both 64-bit integer vector\n" "/// parameters of [4 x i16] into 8-bit unsigned integer values, and\n" "/// constructs a 64-bit integer vector of [8 x i8] as the result. Values\n" "/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated\n" "/// to 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PACKUSWB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a\n" "/// 16-bit signed integer and is converted to an 8-bit unsigned integer with\n" "/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less\n" "/// than 0 are saturated to 0. The converted [4 x i8] values are written to\n" "/// the lower 32 bits of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a\n" "/// 16-bit signed integer and is converted to an 8-bit unsigned integer with\n" "/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less\n" "/// than 0 are saturated to 0. The converted [4 x i8] values are written to\n" "/// the upper 32 bits of the result.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the converted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_packs_pu16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8]\n" "/// and interleaves them into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKHBW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8]. \\n\n" "/// Bits [39:32] are written to bits [7:0] of the result. \\n\n" "/// Bits [47:40] are written to bits [23:16] of the result. \\n\n" "/// Bits [55:48] are written to bits [39:32] of the result. \\n\n" "/// Bits [63:56] are written to bits [55:48] of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// Bits [39:32] are written to bits [15:8] of the result. \\n\n" "/// Bits [47:40] are written to bits [31:24] of the result. \\n\n" "/// Bits [55:48] are written to bits [47:40] of the result. \\n\n" "/// Bits [63:56] are written to bits [63:56] of the result.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Unpacks the upper 32 bits from two 64-bit integer vectors of\n" "/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKHWD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// Bits [47:32] are written to bits [15:0] of the result. \\n\n" "/// Bits [63:48] are written to bits [47:32] of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// Bits [47:32] are written to bits [31:16] of the result. \\n\n" "/// Bits [63:48] are written to bits [63:48] of the result.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Unpacks the upper 32 bits from two 64-bit integer vectors of\n" "/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKHDQ instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to\n" "/// the lower 32 bits of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to\n" "/// the upper 32 bits of the result.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]\n" "/// and interleaves them into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKLBW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// Bits [7:0] are written to bits [7:0] of the result. \\n\n" "/// Bits [15:8] are written to bits [23:16] of the result. \\n\n" "/// Bits [23:16] are written to bits [39:32] of the result. \\n\n" "/// Bits [31:24] are written to bits [55:48] of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// Bits [7:0] are written to bits [15:8] of the result. \\n\n" "/// Bits [15:8] are written to bits [31:24] of the result. \\n\n" "/// Bits [23:16] are written to bits [47:40] of the result. \\n\n" "/// Bits [31:24] are written to bits [63:56] of the result.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Unpacks the lower 32 bits from two 64-bit integer vectors of\n" "/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKLWD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// Bits [15:0] are written to bits [15:0] of the result. \\n\n" "/// Bits [31:16] are written to bits [47:32] of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// Bits [15:0] are written to bits [31:16] of the result. \\n\n" "/// Bits [31:16] are written to bits [63:48] of the result.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Unpacks the lower 32 bits from two 64-bit integer vectors of\n" "/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PUNPCKLDQ instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to\n" "/// the lower 32 bits of the result.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to\n" "/// the upper 32 bits of the result.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the interleaved\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Adds each 8-bit integer element of the first 64-bit integer vector\n" "/// of [8 x i8] to the corresponding 8-bit integer element of the second\n" "/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are\n" "/// packed into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_add_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Adds each 16-bit integer element of the first 64-bit integer vector\n" "/// of [4 x i16] to the corresponding 16-bit integer element of the second\n" "/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are\n" "/// packed into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_add_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Adds each 32-bit integer element of the first 64-bit integer vector\n" "/// of [2 x i32] to the corresponding 32-bit integer element of the second\n" "/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are\n" "/// packed into a 64-bit integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the sums of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_add_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Adds each 8-bit signed integer element of the first 64-bit integer\n" "/// vector of [8 x i8] to the corresponding 8-bit signed integer element of\n" "/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than\n" "/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to\n" "/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDSB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the saturated sums\n" "/// of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_adds_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Adds each 16-bit signed integer element of the first 64-bit integer\n" "/// vector of [4 x i16] to the corresponding 16-bit signed integer element of\n" "/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than\n" "/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are\n" "/// saturated to 0x8000. The results are packed into a 64-bit integer vector\n" "/// of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDSW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the saturated sums\n" "/// of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_adds_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Adds each 8-bit unsigned integer element of the first 64-bit integer\n" "/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of\n" "/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are\n" "/// saturated to 0xFF. The results are packed into a 64-bit integer vector of\n" "/// [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDUSB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the saturated\n" "/// unsigned sums of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_adds_pu8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Adds each 16-bit unsigned integer element of the first 64-bit integer\n" "/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element\n" "/// of the second 64-bit integer vector of [4 x i16]. Sums greater than\n" "/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit\n" "/// integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PADDUSW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the saturated\n" "/// unsigned sums of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_adds_pu16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Subtracts each 8-bit integer element of the second 64-bit integer\n" "/// vector of [8 x i8] from the corresponding 8-bit integer element of the\n" "/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results\n" "/// are packed into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the differences of\n" "/// both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sub_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Subtracts each 16-bit integer element of the second 64-bit integer\n" "/// vector of [4 x i16] from the corresponding 16-bit integer element of the\n" "/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the\n" "/// results are packed into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the differences of\n" "/// both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sub_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Subtracts each 32-bit integer element of the second 64-bit integer\n" "/// vector of [2 x i32] from the corresponding 32-bit integer element of the\n" "/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the\n" "/// results are packed into a 64-bit integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the differences of\n" "/// both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sub_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Subtracts each 8-bit signed integer element of the second 64-bit\n" "/// integer vector of [8 x i8] from the corresponding 8-bit signed integer\n" "/// element of the first 64-bit integer vector of [8 x i8]. Positive results\n" "/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80\n" "/// are saturated to 0x80. The results are packed into a 64-bit integer\n" "/// vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBSB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the saturated\n" "/// differences of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_subs_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Subtracts each 16-bit signed integer element of the second 64-bit\n" "/// integer vector of [4 x i16] from the corresponding 16-bit signed integer\n" "/// element of the first 64-bit integer vector of [4 x i16]. Positive results\n" "/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than\n" "/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit\n" "/// integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBSW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the saturated\n" "/// differences of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_subs_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Subtracts each 8-bit unsigned integer element of the second 64-bit\n" "/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer\n" "/// element of the first 64-bit integer vector of [8 x i8].\n" "///\n" "/// If an element of the first vector is less than the corresponding element\n" "/// of the second vector, the result is saturated to 0. The results are\n" "/// packed into a 64-bit integer vector of [8 x i8].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBUSB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the saturated\n" "/// differences of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_subs_pu8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Subtracts each 16-bit unsigned integer element of the second 64-bit\n" "/// integer vector of [4 x i16] from the corresponding 16-bit unsigned\n" "/// integer element of the first 64-bit integer vector of [4 x i16].\n" "///\n" "/// If an element of the first vector is less than the corresponding element\n" "/// of the second vector, the result is saturated to 0. The results are\n" "/// packed into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSUBUSW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16] containing the minuends.\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16] containing the subtrahends.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the saturated\n" "/// differences of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_subs_pu16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Multiplies each 16-bit signed integer element of the first 64-bit\n" "/// integer vector of [4 x i16] by the corresponding 16-bit signed integer\n" "/// element of the second 64-bit integer vector of [4 x i16] and get four\n" "/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums.\n" "/// The lower 32 bits of these two sums are packed into a 64-bit integer\n" "/// vector of [2 x i32].\n" "///\n" "/// For example, bits [15:0] of both parameters are multiplied, bits [31:16]\n" "/// of both parameters are multiplied, and the sum of both results is written\n" "/// to bits [31:0] of the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMADDWD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the sums of\n" "/// products of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_madd_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Multiplies each 16-bit signed integer element of the first 64-bit\n" "/// integer vector of [4 x i16] by the corresponding 16-bit signed integer\n" "/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper\n" "/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMULHW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits\n" "/// of the products of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_mulhi_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Multiplies each 16-bit signed integer element of the first 64-bit\n" "/// integer vector of [4 x i16] by the corresponding 16-bit signed integer\n" "/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower\n" "/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMULLW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits\n" "/// of the products of both parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_mullo_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Left-shifts each 16-bit signed integer element of the first\n" "/// parameter, which is a 64-bit integer vector of [4 x i16], by the number\n" "/// of bits specified by the second parameter, which is a 64-bit integer. The\n" "/// lower 16 bits of the results are packed into a 64-bit integer vector of\n" "/// [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the left-shifted\n" "/// values. If \\a __count is greater or equal to 16, the result is set to all\n" "/// 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sll_pi16(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);\n" "}\n" "\n" "/// Left-shifts each 16-bit signed integer element of a 64-bit integer\n" "/// vector of [4 x i16] by the number of bits specified by a 32-bit integer.\n" "/// The lower 16 bits of the results are packed into a 64-bit integer vector\n" "/// of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the left-shifted\n" "/// values. If \\a __count is greater or equal to 16, the result is set to all\n" "/// 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_slli_pi16(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);\n" "}\n" "\n" "/// Left-shifts each 32-bit signed integer element of the first\n" "/// parameter, which is a 64-bit integer vector of [2 x i32], by the number\n" "/// of bits specified by the second parameter, which is a 64-bit integer. The\n" "/// lower 32 bits of the results are packed into a 64-bit integer vector of\n" "/// [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the left-shifted\n" "/// values. If \\a __count is greater or equal to 32, the result is set to all\n" "/// 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sll_pi32(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);\n" "}\n" "\n" "/// Left-shifts each 32-bit signed integer element of a 64-bit integer\n" "/// vector of [2 x i32] by the number of bits specified by a 32-bit integer.\n" "/// The lower 32 bits of the results are packed into a 64-bit integer vector\n" "/// of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the left-shifted\n" "/// values. If \\a __count is greater or equal to 32, the result is set to all\n" "/// 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_slli_pi32(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);\n" "}\n" "\n" "/// Left-shifts the first 64-bit integer parameter by the number of bits\n" "/// specified by the second 64-bit integer parameter. The lower 64 bits of\n" "/// result are returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLQ instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector containing the left-shifted value. If\n" "/// \\a __count is greater or equal to 64, the result is set to 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sll_si64(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psllq((__v1di)__m, __count);\n" "}\n" "\n" "/// Left-shifts the first parameter, which is a 64-bit integer, by the\n" "/// number of bits specified by the second parameter, which is a 32-bit\n" "/// integer. The lower 64 bits of result are returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSLLQ instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector containing the left-shifted value. If\n" "/// \\a __count is greater or equal to 64, the result is set to 0.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_slli_si64(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 16-bit integer element of the first parameter,\n" "/// which is a 64-bit integer vector of [4 x i16], by the number of bits\n" "/// specified by the second parameter, which is a 64-bit integer.\n" "///\n" "/// High-order bits are filled with the sign bit of the initial value of each\n" "/// 16-bit element. The 16-bit results are packed into a 64-bit integer\n" "/// vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRAW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sra_pi16(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 16-bit integer element of a 64-bit integer vector\n" "/// of [4 x i16] by the number of bits specified by a 32-bit integer.\n" "///\n" "/// High-order bits are filled with the sign bit of the initial value of each\n" "/// 16-bit element. The 16-bit results are packed into a 64-bit integer\n" "/// vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRAW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srai_pi16(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 32-bit integer element of the first parameter,\n" "/// which is a 64-bit integer vector of [2 x i32], by the number of bits\n" "/// specified by the second parameter, which is a 64-bit integer.\n" "///\n" "/// High-order bits are filled with the sign bit of the initial value of each\n" "/// 32-bit element. The 32-bit results are packed into a 64-bit integer\n" "/// vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRAD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_sra_pi32(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 32-bit integer element of a 64-bit integer vector\n" "/// of [2 x i32] by the number of bits specified by a 32-bit integer.\n" "///\n" "/// High-order bits are filled with the sign bit of the initial value of each\n" "/// 32-bit element. The 32-bit results are packed into a 64-bit integer\n" "/// vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRAD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srai_pi32(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 16-bit integer element of the first parameter,\n" "/// which is a 64-bit integer vector of [4 x i16], by the number of bits\n" "/// specified by the second parameter, which is a 64-bit integer.\n" "///\n" "/// High-order bits are cleared. The 16-bit results are packed into a 64-bit\n" "/// integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srl_pi16(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 16-bit integer element of a 64-bit integer vector\n" "/// of [4 x i16] by the number of bits specified by a 32-bit integer.\n" "///\n" "/// High-order bits are cleared. The 16-bit results are packed into a 64-bit\n" "/// integer vector of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLW instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srli_pi16(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 32-bit integer element of the first parameter,\n" "/// which is a 64-bit integer vector of [2 x i32], by the number of bits\n" "/// specified by the second parameter, which is a 64-bit integer.\n" "///\n" "/// High-order bits are cleared. The 32-bit results are packed into a 64-bit\n" "/// integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srl_pi32(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);\n" "}\n" "\n" "/// Right-shifts each 32-bit integer element of a 64-bit integer vector\n" "/// of [2 x i32] by the number of bits specified by a 32-bit integer.\n" "///\n" "/// High-order bits are cleared. The 32-bit results are packed into a 64-bit\n" "/// integer vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLD instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the right-shifted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srli_pi32(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);\n" "}\n" "\n" "/// Right-shifts the first 64-bit integer parameter by the number of bits\n" "/// specified by the second 64-bit integer parameter.\n" "///\n" "/// High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLQ instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\param __count\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\returns A 64-bit integer vector containing the right-shifted value.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srl_si64(__m64 __m, __m64 __count)\n" "{\n" " return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count);\n" "}\n" "\n" "/// Right-shifts the first parameter, which is a 64-bit integer, by the\n" "/// number of bits specified by the second parameter, which is a 32-bit\n" "/// integer.\n" "///\n" "/// High-order bits are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSRLQ instruction.\n" "///\n" "/// \\param __m\n" "/// A 64-bit integer vector interpreted as a single 64-bit integer.\n" "/// \\param __count\n" "/// A 32-bit integer value.\n" "/// \\returns A 64-bit integer vector containing the right-shifted value.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_srli_si64(__m64 __m, int __count)\n" "{\n" " return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count);\n" "}\n" "\n" "/// Performs a bitwise AND of two 64-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PAND instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector.\n" "/// \\param __m2\n" "/// A 64-bit integer vector.\n" "/// \\returns A 64-bit integer vector containing the bitwise AND of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_and_si64(__m64 __m1, __m64 __m2)\n" "{\n" " return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2);\n" "}\n" "\n" "/// Performs a bitwise NOT of the first 64-bit integer vector, and then\n" "/// performs a bitwise AND of the intermediate result and the second 64-bit\n" "/// integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PANDN instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector. The one's complement of this parameter is used\n" "/// in the bitwise AND.\n" "/// \\param __m2\n" "/// A 64-bit integer vector.\n" "/// \\returns A 64-bit integer vector containing the bitwise AND of the second\n" "/// parameter and the one's complement of the first parameter.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_andnot_si64(__m64 __m1, __m64 __m2)\n" "{\n" " return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2);\n" "}\n" "\n" "/// Performs a bitwise OR of two 64-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the POR instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector.\n" "/// \\param __m2\n" "/// A 64-bit integer vector.\n" "/// \\returns A 64-bit integer vector containing the bitwise OR of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_or_si64(__m64 __m1, __m64 __m2)\n" "{\n" " return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2);\n" "}\n" "\n" "/// Performs a bitwise exclusive OR of two 64-bit integer vectors.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PXOR instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector.\n" "/// \\param __m2\n" "/// A 64-bit integer vector.\n" "/// \\returns A 64-bit integer vector containing the bitwise exclusive OR of both\n" "/// parameters.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_xor_si64(__m64 __m1, __m64 __m2)\n" "{\n" " return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2);\n" "}\n" "\n" "/// Compares the 8-bit integer elements of two 64-bit integer vectors of\n" "/// [8 x i8] to determine if the element of the first vector is equal to the\n" "/// corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPEQB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Compares the 16-bit integer elements of two 64-bit integer vectors of\n" "/// [4 x i16] to determine if the element of the first vector is equal to the\n" "/// corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPEQW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Compares the 32-bit integer elements of two 64-bit integer vectors of\n" "/// [2 x i32] to determine if the element of the first vector is equal to the\n" "/// corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPEQD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Compares the 8-bit integer elements of two 64-bit integer vectors of\n" "/// [8 x i8] to determine if the element of the first vector is greater than\n" "/// the corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPGTB instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector of [8 x i8] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);\n" "}\n" "\n" "/// Compares the 16-bit integer elements of two 64-bit integer vectors of\n" "/// [4 x i16] to determine if the element of the first vector is greater than\n" "/// the corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPGTW instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);\n" "}\n" "\n" "/// Compares the 32-bit integer elements of two 64-bit integer vectors of\n" "/// [2 x i32] to determine if the element of the first vector is greater than\n" "/// the corresponding element of the second vector.\n" "///\n" "/// The comparison yields 0 for false, 0xFFFFFFFF for true.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PCMPGTD instruction.\n" "///\n" "/// \\param __m1\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\param __m2\n" "/// A 64-bit integer vector of [2 x i32].\n" "/// \\returns A 64-bit integer vector of [2 x i32] containing the comparison\n" "/// results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)\n" "{\n" " return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector initialized to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PXOR instruction.\n" "///\n" "/// \\returns An initialized 64-bit integer vector with all elements set to zero.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_setzero_si64(void)\n" "{\n" " return __extension__ (__m64){ 0LL };\n" "}\n" "\n" "/// Constructs a 64-bit integer vector initialized with the specified\n" "/// 32-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i1\n" "/// A 32-bit integer value used to initialize the upper 32 bits of the\n" "/// result.\n" "/// \\param __i0\n" "/// A 32-bit integer value used to initialize the lower 32 bits of the\n" "/// result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set_pi32(int __i1, int __i0)\n" "{\n" " return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector initialized with the specified\n" "/// 16-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __s3\n" "/// A 16-bit integer value used to initialize bits [63:48] of the result.\n" "/// \\param __s2\n" "/// A 16-bit integer value used to initialize bits [47:32] of the result.\n" "/// \\param __s1\n" "/// A 16-bit integer value used to initialize bits [31:16] of the result.\n" "/// \\param __s0\n" "/// A 16-bit integer value used to initialize bits [15:0] of the result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)\n" "{\n" " return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector initialized with the specified\n" "/// 8-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b7\n" "/// An 8-bit integer value used to initialize bits [63:56] of the result.\n" "/// \\param __b6\n" "/// An 8-bit integer value used to initialize bits [55:48] of the result.\n" "/// \\param __b5\n" "/// An 8-bit integer value used to initialize bits [47:40] of the result.\n" "/// \\param __b4\n" "/// An 8-bit integer value used to initialize bits [39:32] of the result.\n" "/// \\param __b3\n" "/// An 8-bit integer value used to initialize bits [31:24] of the result.\n" "/// \\param __b2\n" "/// An 8-bit integer value used to initialize bits [23:16] of the result.\n" "/// \\param __b1\n" "/// An 8-bit integer value used to initialize bits [15:8] of the result.\n" "/// \\param __b0\n" "/// An 8-bit integer value used to initialize bits [7:0] of the result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,\n" " char __b1, char __b0)\n" "{\n" " return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,\n" " __b4, __b5, __b6, __b7);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector of [2 x i32], with each of the\n" "/// 32-bit integer vector elements set to the specified 32-bit integer\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i\n" "/// A 32-bit integer value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 64-bit integer vector of [2 x i32].\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set1_pi32(int __i)\n" "{\n" " return _mm_set_pi32(__i, __i);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector of [4 x i16], with each of the\n" "/// 16-bit integer vector elements set to the specified 16-bit integer\n" "/// value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w\n" "/// A 16-bit integer value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 64-bit integer vector of [4 x i16].\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set1_pi16(short __w)\n" "{\n" " return _mm_set_pi16(__w, __w, __w, __w);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector of [8 x i8], with each of the\n" "/// 8-bit integer vector elements set to the specified 8-bit integer value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b\n" "/// An 8-bit integer value used to initialize each vector element of the\n" "/// result.\n" "/// \\returns An initialized 64-bit integer vector of [8 x i8].\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_set1_pi8(char __b)\n" "{\n" " return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector, initialized in reverse order with\n" "/// the specified 32-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __i0\n" "/// A 32-bit integer value used to initialize the lower 32 bits of the\n" "/// result.\n" "/// \\param __i1\n" "/// A 32-bit integer value used to initialize the upper 32 bits of the\n" "/// result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_setr_pi32(int __i0, int __i1)\n" "{\n" " return _mm_set_pi32(__i1, __i0);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector, initialized in reverse order with\n" "/// the specified 16-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __w0\n" "/// A 16-bit integer value used to initialize bits [15:0] of the result.\n" "/// \\param __w1\n" "/// A 16-bit integer value used to initialize bits [31:16] of the result.\n" "/// \\param __w2\n" "/// A 16-bit integer value used to initialize bits [47:32] of the result.\n" "/// \\param __w3\n" "/// A 16-bit integer value used to initialize bits [63:48] of the result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)\n" "{\n" " return _mm_set_pi16(__w3, __w2, __w1, __w0);\n" "}\n" "\n" "/// Constructs a 64-bit integer vector, initialized in reverse order with\n" "/// the specified 8-bit integer values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __b0\n" "/// An 8-bit integer value used to initialize bits [7:0] of the result.\n" "/// \\param __b1\n" "/// An 8-bit integer value used to initialize bits [15:8] of the result.\n" "/// \\param __b2\n" "/// An 8-bit integer value used to initialize bits [23:16] of the result.\n" "/// \\param __b3\n" "/// An 8-bit integer value used to initialize bits [31:24] of the result.\n" "/// \\param __b4\n" "/// An 8-bit integer value used to initialize bits [39:32] of the result.\n" "/// \\param __b5\n" "/// An 8-bit integer value used to initialize bits [47:40] of the result.\n" "/// \\param __b6\n" "/// An 8-bit integer value used to initialize bits [55:48] of the result.\n" "/// \\param __b7\n" "/// An 8-bit integer value used to initialize bits [63:56] of the result.\n" "/// \\returns An initialized 64-bit integer vector.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS\n" "_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,\n" " char __b6, char __b7)\n" "{\n" " return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "/* Aliases for compatibility. */\n" "#define _m_empty _mm_empty\n" "#define _m_from_int _mm_cvtsi32_si64\n" "#define _m_from_int64 _mm_cvtsi64_m64\n" "#define _m_to_int _mm_cvtsi64_si32\n" "#define _m_to_int64 _mm_cvtm64_si64\n" "#define _m_packsswb _mm_packs_pi16\n" "#define _m_packssdw _mm_packs_pi32\n" "#define _m_packuswb _mm_packs_pu16\n" "#define _m_punpckhbw _mm_unpackhi_pi8\n" "#define _m_punpckhwd _mm_unpackhi_pi16\n" "#define _m_punpckhdq _mm_unpackhi_pi32\n" "#define _m_punpcklbw _mm_unpacklo_pi8\n" "#define _m_punpcklwd _mm_unpacklo_pi16\n" "#define _m_punpckldq _mm_unpacklo_pi32\n" "#define _m_paddb _mm_add_pi8\n" "#define _m_paddw _mm_add_pi16\n" "#define _m_paddd _mm_add_pi32\n" "#define _m_paddsb _mm_adds_pi8\n" "#define _m_paddsw _mm_adds_pi16\n" "#define _m_paddusb _mm_adds_pu8\n" "#define _m_paddusw _mm_adds_pu16\n" "#define _m_psubb _mm_sub_pi8\n" "#define _m_psubw _mm_sub_pi16\n" "#define _m_psubd _mm_sub_pi32\n" "#define _m_psubsb _mm_subs_pi8\n" "#define _m_psubsw _mm_subs_pi16\n" "#define _m_psubusb _mm_subs_pu8\n" "#define _m_psubusw _mm_subs_pu16\n" "#define _m_pmaddwd _mm_madd_pi16\n" "#define _m_pmulhw _mm_mulhi_pi16\n" "#define _m_pmullw _mm_mullo_pi16\n" "#define _m_psllw _mm_sll_pi16\n" "#define _m_psllwi _mm_slli_pi16\n" "#define _m_pslld _mm_sll_pi32\n" "#define _m_pslldi _mm_slli_pi32\n" "#define _m_psllq _mm_sll_si64\n" "#define _m_psllqi _mm_slli_si64\n" "#define _m_psraw _mm_sra_pi16\n" "#define _m_psrawi _mm_srai_pi16\n" "#define _m_psrad _mm_sra_pi32\n" "#define _m_psradi _mm_srai_pi32\n" "#define _m_psrlw _mm_srl_pi16\n" "#define _m_psrlwi _mm_srli_pi16\n" "#define _m_psrld _mm_srl_pi32\n" "#define _m_psrldi _mm_srli_pi32\n" "#define _m_psrlq _mm_srl_si64\n" "#define _m_psrlqi _mm_srli_si64\n" "#define _m_pand _mm_and_si64\n" "#define _m_pandn _mm_andnot_si64\n" "#define _m_por _mm_or_si64\n" "#define _m_pxor _mm_xor_si64\n" "#define _m_pcmpeqb _mm_cmpeq_pi8\n" "#define _m_pcmpeqw _mm_cmpeq_pi16\n" "#define _m_pcmpeqd _mm_cmpeq_pi32\n" "#define _m_pcmpgtb _mm_cmpgt_pi8\n" "#define _m_pcmpgtw _mm_cmpgt_pi16\n" "#define _m_pcmpgtd _mm_cmpgt_pi32\n" "\n" "#endif /* __MMINTRIN_H */\n" "\n" "" } , { "/builtins/movdirintrin.h" , "/*===------------------------- movdirintrin.h ------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef _MOVDIRINTRIN_H\n" "#define _MOVDIRINTRIN_H\n" "\n" "/* Move doubleword as direct store */\n" "static __inline__ void\n" "__attribute__((__always_inline__, __nodebug__, __target__(\"movdiri\")))\n" "_directstoreu_u32 (void *__dst, unsigned int __value)\n" "{\n" " __builtin_ia32_directstore_u32((unsigned int *)__dst, (unsigned int)__value);\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "/* Move quadword as direct store */\n" "static __inline__ void\n" "__attribute__((__always_inline__, __nodebug__, __target__(\"movdiri\")))\n" "_directstoreu_u64 (void *__dst, unsigned long __value)\n" "{\n" " __builtin_ia32_directstore_u64((unsigned long *)__dst, __value);\n" "}\n" "\n" "#endif /* __x86_64__ */\n" "\n" "/*\n" " * movdir64b - Move 64 bytes as direct store.\n" " * The destination must be 64 byte aligned, and the store is atomic.\n" " * The source address has no alignment requirement, and the load from\n" " * the source address is not atomic.\n" " */\n" "static __inline__ void\n" "__attribute__((__always_inline__, __nodebug__, __target__(\"movdir64b\")))\n" "_movdir64b (void *__dst __attribute__((align_value(64))), const void *__src)\n" "{\n" " __builtin_ia32_movdir64b(__dst, __src);\n" "}\n" "\n" "#endif /* _MOVDIRINTRIN_H */\n" "" } , { "/builtins/msa.h" , "/*===---- msa.h - MIPS MSA intrinsics --------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef _MSA_H\n" "#define _MSA_H 1\n" "\n" "#if defined(__mips_msa)\n" "typedef signed char v16i8 __attribute__((vector_size(16), aligned(16)));\n" "typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1)));\n" "typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16)));\n" "typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1)));\n" "typedef short v8i16 __attribute__((vector_size(16), aligned(16)));\n" "typedef short v8i16_h __attribute__((vector_size(16), aligned(2)));\n" "typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16)));\n" "typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2)));\n" "typedef int v4i32 __attribute__((vector_size(16), aligned(16)));\n" "typedef int v4i32_w __attribute__((vector_size(16), aligned(4)));\n" "typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16)));\n" "typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4)));\n" "typedef long long v2i64 __attribute__((vector_size(16), aligned(16)));\n" "typedef long long v2i64_d __attribute__((vector_size(16), aligned(8)));\n" "typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16)));\n" "typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8)));\n" "typedef float v4f32 __attribute__((vector_size(16), aligned(16)));\n" "typedef float v4f32_w __attribute__((vector_size(16), aligned(4)));\n" "typedef double v2f64 __attribute__ ((vector_size(16), aligned(16)));\n" "typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));\n" "\n" "#define __msa_sll_b __builtin_msa_sll_b\n" "#define __msa_sll_h __builtin_msa_sll_h\n" "#define __msa_sll_w __builtin_msa_sll_w\n" "#define __msa_sll_d __builtin_msa_sll_d\n" "#define __msa_slli_b __builtin_msa_slli_b\n" "#define __msa_slli_h __builtin_msa_slli_h\n" "#define __msa_slli_w __builtin_msa_slli_w\n" "#define __msa_slli_d __builtin_msa_slli_d\n" "#define __msa_sra_b __builtin_msa_sra_b\n" "#define __msa_sra_h __builtin_msa_sra_h\n" "#define __msa_sra_w __builtin_msa_sra_w\n" "#define __msa_sra_d __builtin_msa_sra_d\n" "#define __msa_srai_b __builtin_msa_srai_b\n" "#define __msa_srai_h __builtin_msa_srai_h\n" "#define __msa_srai_w __builtin_msa_srai_w\n" "#define __msa_srai_d __builtin_msa_srai_d\n" "#define __msa_srar_b __builtin_msa_srar_b\n" "#define __msa_srar_h __builtin_msa_srar_h\n" "#define __msa_srar_w __builtin_msa_srar_w\n" "#define __msa_srar_d __builtin_msa_srar_d\n" "#define __msa_srari_b __builtin_msa_srari_b\n" "#define __msa_srari_h __builtin_msa_srari_h\n" "#define __msa_srari_w __builtin_msa_srari_w\n" "#define __msa_srari_d __builtin_msa_srari_d\n" "#define __msa_srl_b __builtin_msa_srl_b\n" "#define __msa_srl_h __builtin_msa_srl_h\n" "#define __msa_srl_w __builtin_msa_srl_w\n" "#define __msa_srl_d __builtin_msa_srl_d\n" "#define __msa_srli_b __builtin_msa_srli_b\n" "#define __msa_srli_h __builtin_msa_srli_h\n" "#define __msa_srli_w __builtin_msa_srli_w\n" "#define __msa_srli_d __builtin_msa_srli_d\n" "#define __msa_srlr_b __builtin_msa_srlr_b\n" "#define __msa_srlr_h __builtin_msa_srlr_h\n" "#define __msa_srlr_w __builtin_msa_srlr_w\n" "#define __msa_srlr_d __builtin_msa_srlr_d\n" "#define __msa_srlri_b __builtin_msa_srlri_b\n" "#define __msa_srlri_h __builtin_msa_srlri_h\n" "#define __msa_srlri_w __builtin_msa_srlri_w\n" "#define __msa_srlri_d __builtin_msa_srlri_d\n" "#define __msa_bclr_b __builtin_msa_bclr_b\n" "#define __msa_bclr_h __builtin_msa_bclr_h\n" "#define __msa_bclr_w __builtin_msa_bclr_w\n" "#define __msa_bclr_d __builtin_msa_bclr_d\n" "#define __msa_bclri_b __builtin_msa_bclri_b\n" "#define __msa_bclri_h __builtin_msa_bclri_h\n" "#define __msa_bclri_w __builtin_msa_bclri_w\n" "#define __msa_bclri_d __builtin_msa_bclri_d\n" "#define __msa_bset_b __builtin_msa_bset_b\n" "#define __msa_bset_h __builtin_msa_bset_h\n" "#define __msa_bset_w __builtin_msa_bset_w\n" "#define __msa_bset_d __builtin_msa_bset_d\n" "#define __msa_bseti_b __builtin_msa_bseti_b\n" "#define __msa_bseti_h __builtin_msa_bseti_h\n" "#define __msa_bseti_w __builtin_msa_bseti_w\n" "#define __msa_bseti_d __builtin_msa_bseti_d\n" "#define __msa_bneg_b __builtin_msa_bneg_b\n" "#define __msa_bneg_h __builtin_msa_bneg_h\n" "#define __msa_bneg_w __builtin_msa_bneg_w\n" "#define __msa_bneg_d __builtin_msa_bneg_d\n" "#define __msa_bnegi_b __builtin_msa_bnegi_b\n" "#define __msa_bnegi_h __builtin_msa_bnegi_h\n" "#define __msa_bnegi_w __builtin_msa_bnegi_w\n" "#define __msa_bnegi_d __builtin_msa_bnegi_d\n" "#define __msa_binsl_b __builtin_msa_binsl_b\n" "#define __msa_binsl_h __builtin_msa_binsl_h\n" "#define __msa_binsl_w __builtin_msa_binsl_w\n" "#define __msa_binsl_d __builtin_msa_binsl_d\n" "#define __msa_binsli_b __builtin_msa_binsli_b\n" "#define __msa_binsli_h __builtin_msa_binsli_h\n" "#define __msa_binsli_w __builtin_msa_binsli_w\n" "#define __msa_binsli_d __builtin_msa_binsli_d\n" "#define __msa_binsr_b __builtin_msa_binsr_b\n" "#define __msa_binsr_h __builtin_msa_binsr_h\n" "#define __msa_binsr_w __builtin_msa_binsr_w\n" "#define __msa_binsr_d __builtin_msa_binsr_d\n" "#define __msa_binsri_b __builtin_msa_binsri_b\n" "#define __msa_binsri_h __builtin_msa_binsri_h\n" "#define __msa_binsri_w __builtin_msa_binsri_w\n" "#define __msa_binsri_d __builtin_msa_binsri_d\n" "#define __msa_addv_b __builtin_msa_addv_b\n" "#define __msa_addv_h __builtin_msa_addv_h\n" "#define __msa_addv_w __builtin_msa_addv_w\n" "#define __msa_addv_d __builtin_msa_addv_d\n" "#define __msa_addvi_b __builtin_msa_addvi_b\n" "#define __msa_addvi_h __builtin_msa_addvi_h\n" "#define __msa_addvi_w __builtin_msa_addvi_w\n" "#define __msa_addvi_d __builtin_msa_addvi_d\n" "#define __msa_subv_b __builtin_msa_subv_b\n" "#define __msa_subv_h __builtin_msa_subv_h\n" "#define __msa_subv_w __builtin_msa_subv_w\n" "#define __msa_subv_d __builtin_msa_subv_d\n" "#define __msa_subvi_b __builtin_msa_subvi_b\n" "#define __msa_subvi_h __builtin_msa_subvi_h\n" "#define __msa_subvi_w __builtin_msa_subvi_w\n" "#define __msa_subvi_d __builtin_msa_subvi_d\n" "#define __msa_max_s_b __builtin_msa_max_s_b\n" "#define __msa_max_s_h __builtin_msa_max_s_h\n" "#define __msa_max_s_w __builtin_msa_max_s_w\n" "#define __msa_max_s_d __builtin_msa_max_s_d\n" "#define __msa_maxi_s_b __builtin_msa_maxi_s_b\n" "#define __msa_maxi_s_h __builtin_msa_maxi_s_h\n" "#define __msa_maxi_s_w __builtin_msa_maxi_s_w\n" "#define __msa_maxi_s_d __builtin_msa_maxi_s_d\n" "#define __msa_max_u_b __builtin_msa_max_u_b\n" "#define __msa_max_u_h __builtin_msa_max_u_h\n" "#define __msa_max_u_w __builtin_msa_max_u_w\n" "#define __msa_max_u_d __builtin_msa_max_u_d\n" "#define __msa_maxi_u_b __builtin_msa_maxi_u_b\n" "#define __msa_maxi_u_h __builtin_msa_maxi_u_h\n" "#define __msa_maxi_u_w __builtin_msa_maxi_u_w\n" "#define __msa_maxi_u_d __builtin_msa_maxi_u_d\n" "#define __msa_min_s_b __builtin_msa_min_s_b\n" "#define __msa_min_s_h __builtin_msa_min_s_h\n" "#define __msa_min_s_w __builtin_msa_min_s_w\n" "#define __msa_min_s_d __builtin_msa_min_s_d\n" "#define __msa_mini_s_b __builtin_msa_mini_s_b\n" "#define __msa_mini_s_h __builtin_msa_mini_s_h\n" "#define __msa_mini_s_w __builtin_msa_mini_s_w\n" "#define __msa_mini_s_d __builtin_msa_mini_s_d\n" "#define __msa_min_u_b __builtin_msa_min_u_b\n" "#define __msa_min_u_h __builtin_msa_min_u_h\n" "#define __msa_min_u_w __builtin_msa_min_u_w\n" "#define __msa_min_u_d __builtin_msa_min_u_d\n" "#define __msa_mini_u_b __builtin_msa_mini_u_b\n" "#define __msa_mini_u_h __builtin_msa_mini_u_h\n" "#define __msa_mini_u_w __builtin_msa_mini_u_w\n" "#define __msa_mini_u_d __builtin_msa_mini_u_d\n" "#define __msa_max_a_b __builtin_msa_max_a_b\n" "#define __msa_max_a_h __builtin_msa_max_a_h\n" "#define __msa_max_a_w __builtin_msa_max_a_w\n" "#define __msa_max_a_d __builtin_msa_max_a_d\n" "#define __msa_min_a_b __builtin_msa_min_a_b\n" "#define __msa_min_a_h __builtin_msa_min_a_h\n" "#define __msa_min_a_w __builtin_msa_min_a_w\n" "#define __msa_min_a_d __builtin_msa_min_a_d\n" "#define __msa_ceq_b __builtin_msa_ceq_b\n" "#define __msa_ceq_h __builtin_msa_ceq_h\n" "#define __msa_ceq_w __builtin_msa_ceq_w\n" "#define __msa_ceq_d __builtin_msa_ceq_d\n" "#define __msa_ceqi_b __builtin_msa_ceqi_b\n" "#define __msa_ceqi_h __builtin_msa_ceqi_h\n" "#define __msa_ceqi_w __builtin_msa_ceqi_w\n" "#define __msa_ceqi_d __builtin_msa_ceqi_d\n" "#define __msa_clt_s_b __builtin_msa_clt_s_b\n" "#define __msa_clt_s_h __builtin_msa_clt_s_h\n" "#define __msa_clt_s_w __builtin_msa_clt_s_w\n" "#define __msa_clt_s_d __builtin_msa_clt_s_d\n" "#define __msa_clti_s_b __builtin_msa_clti_s_b\n" "#define __msa_clti_s_h __builtin_msa_clti_s_h\n" "#define __msa_clti_s_w __builtin_msa_clti_s_w\n" "#define __msa_clti_s_d __builtin_msa_clti_s_d\n" "#define __msa_clt_u_b __builtin_msa_clt_u_b\n" "#define __msa_clt_u_h __builtin_msa_clt_u_h\n" "#define __msa_clt_u_w __builtin_msa_clt_u_w\n" "#define __msa_clt_u_d __builtin_msa_clt_u_d\n" "#define __msa_clti_u_b __builtin_msa_clti_u_b\n" "#define __msa_clti_u_h __builtin_msa_clti_u_h\n" "#define __msa_clti_u_w __builtin_msa_clti_u_w\n" "#define __msa_clti_u_d __builtin_msa_clti_u_d\n" "#define __msa_cle_s_b __builtin_msa_cle_s_b\n" "#define __msa_cle_s_h __builtin_msa_cle_s_h\n" "#define __msa_cle_s_w __builtin_msa_cle_s_w\n" "#define __msa_cle_s_d __builtin_msa_cle_s_d\n" "#define __msa_clei_s_b __builtin_msa_clei_s_b\n" "#define __msa_clei_s_h __builtin_msa_clei_s_h\n" "#define __msa_clei_s_w __builtin_msa_clei_s_w\n" "#define __msa_clei_s_d __builtin_msa_clei_s_d\n" "#define __msa_cle_u_b __builtin_msa_cle_u_b\n" "#define __msa_cle_u_h __builtin_msa_cle_u_h\n" "#define __msa_cle_u_w __builtin_msa_cle_u_w\n" "#define __msa_cle_u_d __builtin_msa_cle_u_d\n" "#define __msa_clei_u_b __builtin_msa_clei_u_b\n" "#define __msa_clei_u_h __builtin_msa_clei_u_h\n" "#define __msa_clei_u_w __builtin_msa_clei_u_w\n" "#define __msa_clei_u_d __builtin_msa_clei_u_d\n" "#define __msa_ld_b __builtin_msa_ld_b\n" "#define __msa_ld_h __builtin_msa_ld_h\n" "#define __msa_ld_w __builtin_msa_ld_w\n" "#define __msa_ld_d __builtin_msa_ld_d\n" "#define __msa_ldr_d __builtin_msa_ldr_d\n" "#define __msa_ldr_w __builtin_msa_ldrq_w\n" "#define __msa_st_b __builtin_msa_st_b\n" "#define __msa_st_h __builtin_msa_st_h\n" "#define __msa_st_w __builtin_msa_st_w\n" "#define __msa_st_d __builtin_msa_st_d\n" "#define __msa_str_d __builtin_msa_str_d\n" "#define __msa_str_w __builtin_msa_strq_w\n" "#define __msa_sat_s_b __builtin_msa_sat_s_b\n" "#define __msa_sat_s_h __builtin_msa_sat_s_h\n" "#define __msa_sat_s_w __builtin_msa_sat_s_w\n" "#define __msa_sat_s_d __builtin_msa_sat_s_d\n" "#define __msa_sat_u_b __builtin_msa_sat_u_b\n" "#define __msa_sat_u_h __builtin_msa_sat_u_h\n" "#define __msa_sat_u_w __builtin_msa_sat_u_w\n" "#define __msa_sat_u_d __builtin_msa_sat_u_d\n" "#define __msa_add_a_b __builtin_msa_add_a_b\n" "#define __msa_add_a_h __builtin_msa_add_a_h\n" "#define __msa_add_a_w __builtin_msa_add_a_w\n" "#define __msa_add_a_d __builtin_msa_add_a_d\n" "#define __msa_adds_a_b __builtin_msa_adds_a_b\n" "#define __msa_adds_a_h __builtin_msa_adds_a_h\n" "#define __msa_adds_a_w __builtin_msa_adds_a_w\n" "#define __msa_adds_a_d __builtin_msa_adds_a_d\n" "#define __msa_adds_s_b __builtin_msa_adds_s_b\n" "#define __msa_adds_s_h __builtin_msa_adds_s_h\n" "#define __msa_adds_s_w __builtin_msa_adds_s_w\n" "#define __msa_adds_s_d __builtin_msa_adds_s_d\n" "#define __msa_adds_u_b __builtin_msa_adds_u_b\n" "#define __msa_adds_u_h __builtin_msa_adds_u_h\n" "#define __msa_adds_u_w __builtin_msa_adds_u_w\n" "#define __msa_adds_u_d __builtin_msa_adds_u_d\n" "#define __msa_ave_s_b __builtin_msa_ave_s_b\n" "#define __msa_ave_s_h __builtin_msa_ave_s_h\n" "#define __msa_ave_s_w __builtin_msa_ave_s_w\n" "#define __msa_ave_s_d __builtin_msa_ave_s_d\n" "#define __msa_ave_u_b __builtin_msa_ave_u_b\n" "#define __msa_ave_u_h __builtin_msa_ave_u_h\n" "#define __msa_ave_u_w __builtin_msa_ave_u_w\n" "#define __msa_ave_u_d __builtin_msa_ave_u_d\n" "#define __msa_aver_s_b __builtin_msa_aver_s_b\n" "#define __msa_aver_s_h __builtin_msa_aver_s_h\n" "#define __msa_aver_s_w __builtin_msa_aver_s_w\n" "#define __msa_aver_s_d __builtin_msa_aver_s_d\n" "#define __msa_aver_u_b __builtin_msa_aver_u_b\n" "#define __msa_aver_u_h __builtin_msa_aver_u_h\n" "#define __msa_aver_u_w __builtin_msa_aver_u_w\n" "#define __msa_aver_u_d __builtin_msa_aver_u_d\n" "#define __msa_subs_s_b __builtin_msa_subs_s_b\n" "#define __msa_subs_s_h __builtin_msa_subs_s_h\n" "#define __msa_subs_s_w __builtin_msa_subs_s_w\n" "#define __msa_subs_s_d __builtin_msa_subs_s_d\n" "#define __msa_subs_u_b __builtin_msa_subs_u_b\n" "#define __msa_subs_u_h __builtin_msa_subs_u_h\n" "#define __msa_subs_u_w __builtin_msa_subs_u_w\n" "#define __msa_subs_u_d __builtin_msa_subs_u_d\n" "#define __msa_subsuu_s_b __builtin_msa_subsuu_s_b\n" "#define __msa_subsuu_s_h __builtin_msa_subsuu_s_h\n" "#define __msa_subsuu_s_w __builtin_msa_subsuu_s_w\n" "#define __msa_subsuu_s_d __builtin_msa_subsuu_s_d\n" "#define __msa_subsus_u_b __builtin_msa_subsus_u_b\n" "#define __msa_subsus_u_h __builtin_msa_subsus_u_h\n" "#define __msa_subsus_u_w __builtin_msa_subsus_u_w\n" "#define __msa_subsus_u_d __builtin_msa_subsus_u_d\n" "#define __msa_asub_s_b __builtin_msa_asub_s_b\n" "#define __msa_asub_s_h __builtin_msa_asub_s_h\n" "#define __msa_asub_s_w __builtin_msa_asub_s_w\n" "#define __msa_asub_s_d __builtin_msa_asub_s_d\n" "#define __msa_asub_u_b __builtin_msa_asub_u_b\n" "#define __msa_asub_u_h __builtin_msa_asub_u_h\n" "#define __msa_asub_u_w __builtin_msa_asub_u_w\n" "#define __msa_asub_u_d __builtin_msa_asub_u_d\n" "#define __msa_mulv_b __builtin_msa_mulv_b\n" "#define __msa_mulv_h __builtin_msa_mulv_h\n" "#define __msa_mulv_w __builtin_msa_mulv_w\n" "#define __msa_mulv_d __builtin_msa_mulv_d\n" "#define __msa_maddv_b __builtin_msa_maddv_b\n" "#define __msa_maddv_h __builtin_msa_maddv_h\n" "#define __msa_maddv_w __builtin_msa_maddv_w\n" "#define __msa_maddv_d __builtin_msa_maddv_d\n" "#define __msa_msubv_b __builtin_msa_msubv_b\n" "#define __msa_msubv_h __builtin_msa_msubv_h\n" "#define __msa_msubv_w __builtin_msa_msubv_w\n" "#define __msa_msubv_d __builtin_msa_msubv_d\n" "#define __msa_div_s_b __builtin_msa_div_s_b\n" "#define __msa_div_s_h __builtin_msa_div_s_h\n" "#define __msa_div_s_w __builtin_msa_div_s_w\n" "#define __msa_div_s_d __builtin_msa_div_s_d\n" "#define __msa_div_u_b __builtin_msa_div_u_b\n" "#define __msa_div_u_h __builtin_msa_div_u_h\n" "#define __msa_div_u_w __builtin_msa_div_u_w\n" "#define __msa_div_u_d __builtin_msa_div_u_d\n" "#define __msa_hadd_s_h __builtin_msa_hadd_s_h\n" "#define __msa_hadd_s_w __builtin_msa_hadd_s_w\n" "#define __msa_hadd_s_d __builtin_msa_hadd_s_d\n" "#define __msa_hadd_u_h __builtin_msa_hadd_u_h\n" "#define __msa_hadd_u_w __builtin_msa_hadd_u_w\n" "#define __msa_hadd_u_d __builtin_msa_hadd_u_d\n" "#define __msa_hsub_s_h __builtin_msa_hsub_s_h\n" "#define __msa_hsub_s_w __builtin_msa_hsub_s_w\n" "#define __msa_hsub_s_d __builtin_msa_hsub_s_d\n" "#define __msa_hsub_u_h __builtin_msa_hsub_u_h\n" "#define __msa_hsub_u_w __builtin_msa_hsub_u_w\n" "#define __msa_hsub_u_d __builtin_msa_hsub_u_d\n" "#define __msa_mod_s_b __builtin_msa_mod_s_b\n" "#define __msa_mod_s_h __builtin_msa_mod_s_h\n" "#define __msa_mod_s_w __builtin_msa_mod_s_w\n" "#define __msa_mod_s_d __builtin_msa_mod_s_d\n" "#define __msa_mod_u_b __builtin_msa_mod_u_b\n" "#define __msa_mod_u_h __builtin_msa_mod_u_h\n" "#define __msa_mod_u_w __builtin_msa_mod_u_w\n" "#define __msa_mod_u_d __builtin_msa_mod_u_d\n" "#define __msa_dotp_s_h __builtin_msa_dotp_s_h\n" "#define __msa_dotp_s_w __builtin_msa_dotp_s_w\n" "#define __msa_dotp_s_d __builtin_msa_dotp_s_d\n" "#define __msa_dotp_u_h __builtin_msa_dotp_u_h\n" "#define __msa_dotp_u_w __builtin_msa_dotp_u_w\n" "#define __msa_dotp_u_d __builtin_msa_dotp_u_d\n" "#define __msa_dpadd_s_h __builtin_msa_dpadd_s_h\n" "#define __msa_dpadd_s_w __builtin_msa_dpadd_s_w\n" "#define __msa_dpadd_s_d __builtin_msa_dpadd_s_d\n" "#define __msa_dpadd_u_h __builtin_msa_dpadd_u_h\n" "#define __msa_dpadd_u_w __builtin_msa_dpadd_u_w\n" "#define __msa_dpadd_u_d __builtin_msa_dpadd_u_d\n" "#define __msa_dpsub_s_h __builtin_msa_dpsub_s_h\n" "#define __msa_dpsub_s_w __builtin_msa_dpsub_s_w\n" "#define __msa_dpsub_s_d __builtin_msa_dpsub_s_d\n" "#define __msa_dpsub_u_h __builtin_msa_dpsub_u_h\n" "#define __msa_dpsub_u_w __builtin_msa_dpsub_u_w\n" "#define __msa_dpsub_u_d __builtin_msa_dpsub_u_d\n" "#define __msa_sld_b __builtin_msa_sld_b\n" "#define __msa_sld_h __builtin_msa_sld_h\n" "#define __msa_sld_w __builtin_msa_sld_w\n" "#define __msa_sld_d __builtin_msa_sld_d\n" "#define __msa_sldi_b __builtin_msa_sldi_b\n" "#define __msa_sldi_h __builtin_msa_sldi_h\n" "#define __msa_sldi_w __builtin_msa_sldi_w\n" "#define __msa_sldi_d __builtin_msa_sldi_d\n" "#define __msa_splat_b __builtin_msa_splat_b\n" "#define __msa_splat_h __builtin_msa_splat_h\n" "#define __msa_splat_w __builtin_msa_splat_w\n" "#define __msa_splat_d __builtin_msa_splat_d\n" "#define __msa_splati_b __builtin_msa_splati_b\n" "#define __msa_splati_h __builtin_msa_splati_h\n" "#define __msa_splati_w __builtin_msa_splati_w\n" "#define __msa_splati_d __builtin_msa_splati_d\n" "#define __msa_pckev_b __builtin_msa_pckev_b\n" "#define __msa_pckev_h __builtin_msa_pckev_h\n" "#define __msa_pckev_w __builtin_msa_pckev_w\n" "#define __msa_pckev_d __builtin_msa_pckev_d\n" "#define __msa_pckod_b __builtin_msa_pckod_b\n" "#define __msa_pckod_h __builtin_msa_pckod_h\n" "#define __msa_pckod_w __builtin_msa_pckod_w\n" "#define __msa_pckod_d __builtin_msa_pckod_d\n" "#define __msa_ilvl_b __builtin_msa_ilvl_b\n" "#define __msa_ilvl_h __builtin_msa_ilvl_h\n" "#define __msa_ilvl_w __builtin_msa_ilvl_w\n" "#define __msa_ilvl_d __builtin_msa_ilvl_d\n" "#define __msa_ilvr_b __builtin_msa_ilvr_b\n" "#define __msa_ilvr_h __builtin_msa_ilvr_h\n" "#define __msa_ilvr_w __builtin_msa_ilvr_w\n" "#define __msa_ilvr_d __builtin_msa_ilvr_d\n" "#define __msa_ilvev_b __builtin_msa_ilvev_b\n" "#define __msa_ilvev_h __builtin_msa_ilvev_h\n" "#define __msa_ilvev_w __builtin_msa_ilvev_w\n" "#define __msa_ilvev_d __builtin_msa_ilvev_d\n" "#define __msa_ilvod_b __builtin_msa_ilvod_b\n" "#define __msa_ilvod_h __builtin_msa_ilvod_h\n" "#define __msa_ilvod_w __builtin_msa_ilvod_w\n" "#define __msa_ilvod_d __builtin_msa_ilvod_d\n" "#define __msa_vshf_b __builtin_msa_vshf_b\n" "#define __msa_vshf_h __builtin_msa_vshf_h\n" "#define __msa_vshf_w __builtin_msa_vshf_w\n" "#define __msa_vshf_d __builtin_msa_vshf_d\n" "#define __msa_and_v __builtin_msa_and_v\n" "#define __msa_andi_b __builtin_msa_andi_b\n" "#define __msa_or_v __builtin_msa_or_v\n" "#define __msa_ori_b __builtin_msa_ori_b\n" "#define __msa_nor_v __builtin_msa_nor_v\n" "#define __msa_nori_b __builtin_msa_nori_b\n" "#define __msa_xor_v __builtin_msa_xor_v\n" "#define __msa_xori_b __builtin_msa_xori_b\n" "#define __msa_bmnz_v __builtin_msa_bmnz_v\n" "#define __msa_bmnzi_b __builtin_msa_bmnzi_b\n" "#define __msa_bmz_v __builtin_msa_bmz_v\n" "#define __msa_bmzi_b __builtin_msa_bmzi_b\n" "#define __msa_bsel_v __builtin_msa_bsel_v\n" "#define __msa_bseli_b __builtin_msa_bseli_b\n" "#define __msa_shf_b __builtin_msa_shf_b\n" "#define __msa_shf_h __builtin_msa_shf_h\n" "#define __msa_shf_w __builtin_msa_shf_w\n" "#define __msa_test_bnz_v __builtin_msa_bnz_v\n" "#define __msa_test_bz_v __builtin_msa_bz_v\n" "#define __msa_fill_b __builtin_msa_fill_b\n" "#define __msa_fill_h __builtin_msa_fill_h\n" "#define __msa_fill_w __builtin_msa_fill_w\n" "#define __msa_fill_d __builtin_msa_fill_d\n" "#define __msa_pcnt_b __builtin_msa_pcnt_b\n" "#define __msa_pcnt_h __builtin_msa_pcnt_h\n" "#define __msa_pcnt_w __builtin_msa_pcnt_w\n" "#define __msa_pcnt_d __builtin_msa_pcnt_d\n" "#define __msa_nloc_b __builtin_msa_nloc_b\n" "#define __msa_nloc_h __builtin_msa_nloc_h\n" "#define __msa_nloc_w __builtin_msa_nloc_w\n" "#define __msa_nloc_d __builtin_msa_nloc_d\n" "#define __msa_nlzc_b __builtin_msa_nlzc_b\n" "#define __msa_nlzc_h __builtin_msa_nlzc_h\n" "#define __msa_nlzc_w __builtin_msa_nlzc_w\n" "#define __msa_nlzc_d __builtin_msa_nlzc_d\n" "#define __msa_copy_s_b __builtin_msa_copy_s_b\n" "#define __msa_copy_s_h __builtin_msa_copy_s_h\n" "#define __msa_copy_s_w __builtin_msa_copy_s_w\n" "#define __msa_copy_s_d __builtin_msa_copy_s_d\n" "#define __msa_copy_u_b __builtin_msa_copy_u_b\n" "#define __msa_copy_u_h __builtin_msa_copy_u_h\n" "#define __msa_copy_u_w __builtin_msa_copy_u_w\n" "#define __msa_copy_u_d __builtin_msa_copy_u_d\n" "#define __msa_insert_b __builtin_msa_insert_b\n" "#define __msa_insert_h __builtin_msa_insert_h\n" "#define __msa_insert_w __builtin_msa_insert_w\n" "#define __msa_insert_d __builtin_msa_insert_d\n" "#define __msa_insve_b __builtin_msa_insve_b\n" "#define __msa_insve_h __builtin_msa_insve_h\n" "#define __msa_insve_w __builtin_msa_insve_w\n" "#define __msa_insve_d __builtin_msa_insve_d\n" "#define __msa_test_bnz_b __builtin_msa_bnz_b\n" "#define __msa_test_bnz_h __builtin_msa_bnz_h\n" "#define __msa_test_bnz_w __builtin_msa_bnz_w\n" "#define __msa_test_bnz_d __builtin_msa_bnz_d\n" "#define __msa_test_bz_b __builtin_msa_bz_b\n" "#define __msa_test_bz_h __builtin_msa_bz_h\n" "#define __msa_test_bz_w __builtin_msa_bz_w\n" "#define __msa_test_bz_d __builtin_msa_bz_d\n" "#define __msa_ldi_b __builtin_msa_ldi_b\n" "#define __msa_ldi_h __builtin_msa_ldi_h\n" "#define __msa_ldi_w __builtin_msa_ldi_w\n" "#define __msa_ldi_d __builtin_msa_ldi_d\n" "#define __msa_fcaf_w __builtin_msa_fcaf_w\n" "#define __msa_fcaf_d __builtin_msa_fcaf_d\n" "#define __msa_fcor_w __builtin_msa_fcor_w\n" "#define __msa_fcor_d __builtin_msa_fcor_d\n" "#define __msa_fcun_w __builtin_msa_fcun_w\n" "#define __msa_fcun_d __builtin_msa_fcun_d\n" "#define __msa_fcune_w __builtin_msa_fcune_w\n" "#define __msa_fcune_d __builtin_msa_fcune_d\n" "#define __msa_fcueq_w __builtin_msa_fcueq_w\n" "#define __msa_fcueq_d __builtin_msa_fcueq_d\n" "#define __msa_fceq_w __builtin_msa_fceq_w\n" "#define __msa_fceq_d __builtin_msa_fceq_d\n" "#define __msa_fcne_w __builtin_msa_fcne_w\n" "#define __msa_fcne_d __builtin_msa_fcne_d\n" "#define __msa_fclt_w __builtin_msa_fclt_w\n" "#define __msa_fclt_d __builtin_msa_fclt_d\n" "#define __msa_fcult_w __builtin_msa_fcult_w\n" "#define __msa_fcult_d __builtin_msa_fcult_d\n" "#define __msa_fcle_w __builtin_msa_fcle_w\n" "#define __msa_fcle_d __builtin_msa_fcle_d\n" "#define __msa_fcule_w __builtin_msa_fcule_w\n" "#define __msa_fcule_d __builtin_msa_fcule_d\n" "#define __msa_fsaf_w __builtin_msa_fsaf_w\n" "#define __msa_fsaf_d __builtin_msa_fsaf_d\n" "#define __msa_fsor_w __builtin_msa_fsor_w\n" "#define __msa_fsor_d __builtin_msa_fsor_d\n" "#define __msa_fsun_w __builtin_msa_fsun_w\n" "#define __msa_fsun_d __builtin_msa_fsun_d\n" "#define __msa_fsune_w __builtin_msa_fsune_w\n" "#define __msa_fsune_d __builtin_msa_fsune_d\n" "#define __msa_fsueq_w __builtin_msa_fsueq_w\n" "#define __msa_fsueq_d __builtin_msa_fsueq_d\n" "#define __msa_fseq_w __builtin_msa_fseq_w\n" "#define __msa_fseq_d __builtin_msa_fseq_d\n" "#define __msa_fsne_w __builtin_msa_fsne_w\n" "#define __msa_fsne_d __builtin_msa_fsne_d\n" "#define __msa_fslt_w __builtin_msa_fslt_w\n" "#define __msa_fslt_d __builtin_msa_fslt_d\n" "#define __msa_fsult_w __builtin_msa_fsult_w\n" "#define __msa_fsult_d __builtin_msa_fsult_d\n" "#define __msa_fsle_w __builtin_msa_fsle_w\n" "#define __msa_fsle_d __builtin_msa_fsle_d\n" "#define __msa_fsule_w __builtin_msa_fsule_w\n" "#define __msa_fsule_d __builtin_msa_fsule_d\n" "#define __msa_fadd_w __builtin_msa_fadd_w\n" "#define __msa_fadd_d __builtin_msa_fadd_d\n" "#define __msa_fsub_w __builtin_msa_fsub_w\n" "#define __msa_fsub_d __builtin_msa_fsub_d\n" "#define __msa_fmul_w __builtin_msa_fmul_w\n" "#define __msa_fmul_d __builtin_msa_fmul_d\n" "#define __msa_fdiv_w __builtin_msa_fdiv_w\n" "#define __msa_fdiv_d __builtin_msa_fdiv_d\n" "#define __msa_fmadd_w __builtin_msa_fmadd_w\n" "#define __msa_fmadd_d __builtin_msa_fmadd_d\n" "#define __msa_fmsub_w __builtin_msa_fmsub_w\n" "#define __msa_fmsub_d __builtin_msa_fmsub_d\n" "#define __msa_fexp2_w __builtin_msa_fexp2_w\n" "#define __msa_fexp2_d __builtin_msa_fexp2_d\n" "#define __msa_fexdo_h __builtin_msa_fexdo_h\n" "#define __msa_fexdo_w __builtin_msa_fexdo_w\n" "#define __msa_ftq_h __builtin_msa_ftq_h\n" "#define __msa_ftq_w __builtin_msa_ftq_w\n" "#define __msa_fmin_w __builtin_msa_fmin_w\n" "#define __msa_fmin_d __builtin_msa_fmin_d\n" "#define __msa_fmin_a_w __builtin_msa_fmin_a_w\n" "#define __msa_fmin_a_d __builtin_msa_fmin_a_d\n" "#define __msa_fmax_w __builtin_msa_fmax_w\n" "#define __msa_fmax_d __builtin_msa_fmax_d\n" "#define __msa_fmax_a_w __builtin_msa_fmax_a_w\n" "#define __msa_fmax_a_d __builtin_msa_fmax_a_d\n" "#define __msa_mul_q_h __builtin_msa_mul_q_h\n" "#define __msa_mul_q_w __builtin_msa_mul_q_w\n" "#define __msa_mulr_q_h __builtin_msa_mulr_q_h\n" "#define __msa_mulr_q_w __builtin_msa_mulr_q_w\n" "#define __msa_madd_q_h __builtin_msa_madd_q_h\n" "#define __msa_madd_q_w __builtin_msa_madd_q_w\n" "#define __msa_maddr_q_h __builtin_msa_maddr_q_h\n" "#define __msa_maddr_q_w __builtin_msa_maddr_q_w\n" "#define __msa_msub_q_h __builtin_msa_msub_q_h\n" "#define __msa_msub_q_w __builtin_msa_msub_q_w\n" "#define __msa_msubr_q_h __builtin_msa_msubr_q_h\n" "#define __msa_msubr_q_w __builtin_msa_msubr_q_w\n" "#define __msa_fclass_w __builtin_msa_fclass_w\n" "#define __msa_fclass_d __builtin_msa_fclass_d\n" "#define __msa_fsqrt_w __builtin_msa_fsqrt_w\n" "#define __msa_fsqrt_d __builtin_msa_fsqrt_d\n" "#define __msa_frcp_w __builtin_msa_frcp_w\n" "#define __msa_frcp_d __builtin_msa_frcp_d\n" "#define __msa_frint_w __builtin_msa_frint_w\n" "#define __msa_frint_d __builtin_msa_frint_d\n" "#define __msa_frsqrt_w __builtin_msa_frsqrt_w\n" "#define __msa_frsqrt_d __builtin_msa_frsqrt_d\n" "#define __msa_flog2_w __builtin_msa_flog2_w\n" "#define __msa_flog2_d __builtin_msa_flog2_d\n" "#define __msa_fexupl_w __builtin_msa_fexupl_w\n" "#define __msa_fexupl_d __builtin_msa_fexupl_d\n" "#define __msa_fexupr_w __builtin_msa_fexupr_w\n" "#define __msa_fexupr_d __builtin_msa_fexupr_d\n" "#define __msa_ffql_w __builtin_msa_ffql_w\n" "#define __msa_ffql_d __builtin_msa_ffql_d\n" "#define __msa_ffqr_w __builtin_msa_ffqr_w\n" "#define __msa_ffqr_d __builtin_msa_ffqr_d\n" "#define __msa_ftint_s_w __builtin_msa_ftint_s_w\n" "#define __msa_ftint_s_d __builtin_msa_ftint_s_d\n" "#define __msa_ftint_u_w __builtin_msa_ftint_u_w\n" "#define __msa_ftint_u_d __builtin_msa_ftint_u_d\n" "#define __msa_ftrunc_s_w __builtin_msa_ftrunc_s_w\n" "#define __msa_ftrunc_s_d __builtin_msa_ftrunc_s_d\n" "#define __msa_ftrunc_u_w __builtin_msa_ftrunc_u_w\n" "#define __msa_ftrunc_u_d __builtin_msa_ftrunc_u_d\n" "#define __msa_ffint_s_w __builtin_msa_ffint_s_w\n" "#define __msa_ffint_s_d __builtin_msa_ffint_s_d\n" "#define __msa_ffint_u_w __builtin_msa_ffint_u_w\n" "#define __msa_ffint_u_d __builtin_msa_ffint_u_d\n" "#define __msa_cfcmsa __builtin_msa_cfcmsa\n" "#define __msa_move_v __builtin_msa_move_v\n" "#define __msa_cast_to_vector_float __builtin_msa_cast_to_vector_float\n" "#define __msa_cast_to_vector_double __builtin_msa_cast_to_vector_double\n" "#define __msa_cast_to_scalar_float __builtin_msa_cast_to_scalar_float\n" "#define __msa_cast_to_scalar_double __builtin_msa_cast_to_scalar_double\n" "#endif /* defined(__mips_msa) */\n" "#endif /* _MSA_H */\n" "" } , { "/builtins/mwaitxintrin.h" , "/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __MWAITXINTRIN_H\n" "#define __MWAITXINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"mwaitx\")))\n" "\n" "/// Establishes a linear address memory range to be monitored and puts\n" "/// the processor in the monitor event pending state. Data stored in the\n" "/// monitored address range causes the processor to exit the pending state.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MONITORX instruction.\n" "///\n" "/// \\param __p\n" "/// The memory range to be monitored. The size of the range is determined by\n" "/// CPUID function 0000_0005h.\n" "/// \\param __extensions\n" "/// Optional extensions for the monitoring state.\n" "/// \\param __hints\n" "/// Optional hints for the monitoring state.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints)\n" "{\n" " __builtin_ia32_monitorx(__p, __extensions, __hints);\n" "}\n" "\n" "/// Used with the \\c MONITORX instruction to wait while the processor is in\n" "/// the monitor event pending state. Data stored in the monitored address\n" "/// range, or an interrupt, causes the processor to exit the pending state.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MWAITX instruction.\n" "///\n" "/// \\param __extensions\n" "/// Optional extensions for the monitoring state, which can vary by\n" "/// processor.\n" "/// \\param __hints\n" "/// Optional hints for the monitoring state, which can vary by processor.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)\n" "{\n" " __builtin_ia32_mwaitx(__extensions, __hints, __clock);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __MWAITXINTRIN_H */\n" "" } , { "/builtins/nmmintrin.h" , "/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __NMMINTRIN_H\n" "#define __NMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,\n" " just include it now then. */\n" "#include \n" "#endif /* __NMMINTRIN_H */\n" "" } , { "/builtins/opencl-c-base.h" , "//===----- opencl-c-base.h - OpenCL C language base definitions -----------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "\n" "#ifndef _OPENCL_BASE_H_\n" "#define _OPENCL_BASE_H_\n" "\n" "// Define extension macros\n" "\n" "#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)\n" "// For SPIR and SPIR-V all extensions are supported.\n" "#if defined(__SPIR__) || defined(__SPIRV__)\n" "#define cl_khr_subgroup_extended_types 1\n" "#define cl_khr_subgroup_non_uniform_vote 1\n" "#define cl_khr_subgroup_ballot 1\n" "#define cl_khr_subgroup_non_uniform_arithmetic 1\n" "#define cl_khr_subgroup_shuffle 1\n" "#define cl_khr_subgroup_shuffle_relative 1\n" "#define cl_khr_subgroup_clustered_reduce 1\n" "#define cl_khr_subgroup_rotate 1\n" "#define cl_khr_extended_bit_ops 1\n" "#define cl_khr_integer_dot_product 1\n" "#define __opencl_c_integer_dot_product_input_4x8bit 1\n" "#define __opencl_c_integer_dot_product_input_4x8bit_packed 1\n" "#define cl_ext_float_atomics 1\n" "#ifdef cl_khr_fp16\n" "#define __opencl_c_ext_fp16_global_atomic_load_store 1\n" "#define __opencl_c_ext_fp16_local_atomic_load_store 1\n" "#define __opencl_c_ext_fp16_global_atomic_add 1\n" "#define __opencl_c_ext_fp16_local_atomic_add 1\n" "#define __opencl_c_ext_fp16_global_atomic_min_max 1\n" "#define __opencl_c_ext_fp16_local_atomic_min_max 1\n" "#endif\n" "#ifdef cl_khr_fp64\n" "#define __opencl_c_ext_fp64_global_atomic_add 1\n" "#define __opencl_c_ext_fp64_local_atomic_add 1\n" "#define __opencl_c_ext_fp64_global_atomic_min_max 1\n" "#define __opencl_c_ext_fp64_local_atomic_min_max 1\n" "#endif\n" "#define __opencl_c_ext_fp32_global_atomic_add 1\n" "#define __opencl_c_ext_fp32_local_atomic_add 1\n" "#define __opencl_c_ext_fp32_global_atomic_min_max 1\n" "#define __opencl_c_ext_fp32_local_atomic_min_max 1\n" "\n" "#endif // defined(__SPIR__) || defined(__SPIRV__)\n" "#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)\n" "\n" "// Define feature macros for OpenCL C 2.0\n" "#if (__OPENCL_CPP_VERSION__ == 100 || __OPENCL_C_VERSION__ == 200)\n" "#define __opencl_c_pipes 1\n" "#define __opencl_c_generic_address_space 1\n" "#define __opencl_c_work_group_collective_functions 1\n" "#define __opencl_c_atomic_order_acq_rel 1\n" "#define __opencl_c_atomic_order_seq_cst 1\n" "#define __opencl_c_atomic_scope_device 1\n" "#define __opencl_c_atomic_scope_all_devices 1\n" "#define __opencl_c_device_enqueue 1\n" "#define __opencl_c_read_write_images 1\n" "#define __opencl_c_program_scope_global_variables 1\n" "#define __opencl_c_images 1\n" "#endif\n" "\n" "// Define header-only feature macros for OpenCL C 3.0.\n" "#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)\n" "// For the SPIR and SPIR-V target all features are supported.\n" "#if defined(__SPIR__) || defined(__SPIRV__)\n" "#define __opencl_c_work_group_collective_functions 1\n" "#define __opencl_c_atomic_order_seq_cst 1\n" "#define __opencl_c_atomic_scope_device 1\n" "#define __opencl_c_atomic_scope_all_devices 1\n" "#define __opencl_c_read_write_images 1\n" "#endif // defined(__SPIR__)\n" "\n" "// Undefine any feature macros that have been explicitly disabled using\n" "// an __undef_ macro.\n" "#ifdef __undef___opencl_c_work_group_collective_functions\n" "#undef __opencl_c_work_group_collective_functions\n" "#endif\n" "#ifdef __undef___opencl_c_atomic_order_seq_cst\n" "#undef __opencl_c_atomic_order_seq_cst\n" "#endif\n" "#ifdef __undef___opencl_c_atomic_scope_device\n" "#undef __opencl_c_atomic_scope_device\n" "#endif\n" "#ifdef __undef___opencl_c_atomic_scope_all_devices\n" "#undef __opencl_c_atomic_scope_all_devices\n" "#endif\n" "#ifdef __undef___opencl_c_read_write_images\n" "#undef __opencl_c_read_write_images\n" "#endif\n" "\n" "#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)\n" "\n" "#if !defined(__opencl_c_generic_address_space)\n" "// Internal feature macro to provide named (global, local, private) address\n" "// space overloads for builtin functions that take a pointer argument.\n" "#define __opencl_c_named_address_space_builtins 1\n" "#endif // !defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)\n" "// Internal feature macro to provide subgroup builtins.\n" "#define __opencl_subgroup_builtins 1\n" "#endif\n" "\n" "// built-in scalar data types:\n" "\n" "/**\n" " * An unsigned 8-bit integer.\n" " */\n" "typedef unsigned char uchar;\n" "\n" "/**\n" " * An unsigned 16-bit integer.\n" " */\n" "typedef unsigned short ushort;\n" "\n" "/**\n" " * An unsigned 32-bit integer.\n" " */\n" "typedef unsigned int uint;\n" "\n" "/**\n" " * An unsigned 64-bit integer.\n" " */\n" "typedef unsigned long ulong;\n" "\n" "/**\n" " * The unsigned integer type of the result of the sizeof operator. This\n" " * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS\n" " * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if\n" " * CL_DEVICE_ADDRESS_BITS is 64-bits.\n" " */\n" "typedef __SIZE_TYPE__ size_t;\n" "\n" "/**\n" " * A signed integer type that is the result of subtracting two pointers.\n" " * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS\n" " * defined in table 4.3 is 32-bits and is a 64-bit signed integer if\n" " * CL_DEVICE_ADDRESS_BITS is 64-bits.\n" " */\n" "typedef __PTRDIFF_TYPE__ ptrdiff_t;\n" "\n" "/**\n" " * A signed integer type with the property that any valid pointer to\n" " * void can be converted to this type, then converted back to pointer\n" " * to void, and the result will compare equal to the original pointer.\n" " */\n" "typedef __INTPTR_TYPE__ intptr_t;\n" "\n" "/**\n" " * An unsigned integer type with the property that any valid pointer to\n" " * void can be converted to this type, then converted back to pointer\n" " * to void, and the result will compare equal to the original pointer.\n" " */\n" "typedef __UINTPTR_TYPE__ uintptr_t;\n" "\n" "// built-in vector data types:\n" "typedef char char2 __attribute__((ext_vector_type(2)));\n" "typedef char char3 __attribute__((ext_vector_type(3)));\n" "typedef char char4 __attribute__((ext_vector_type(4)));\n" "typedef char char8 __attribute__((ext_vector_type(8)));\n" "typedef char char16 __attribute__((ext_vector_type(16)));\n" "typedef uchar uchar2 __attribute__((ext_vector_type(2)));\n" "typedef uchar uchar3 __attribute__((ext_vector_type(3)));\n" "typedef uchar uchar4 __attribute__((ext_vector_type(4)));\n" "typedef uchar uchar8 __attribute__((ext_vector_type(8)));\n" "typedef uchar uchar16 __attribute__((ext_vector_type(16)));\n" "typedef short short2 __attribute__((ext_vector_type(2)));\n" "typedef short short3 __attribute__((ext_vector_type(3)));\n" "typedef short short4 __attribute__((ext_vector_type(4)));\n" "typedef short short8 __attribute__((ext_vector_type(8)));\n" "typedef short short16 __attribute__((ext_vector_type(16)));\n" "typedef ushort ushort2 __attribute__((ext_vector_type(2)));\n" "typedef ushort ushort3 __attribute__((ext_vector_type(3)));\n" "typedef ushort ushort4 __attribute__((ext_vector_type(4)));\n" "typedef ushort ushort8 __attribute__((ext_vector_type(8)));\n" "typedef ushort ushort16 __attribute__((ext_vector_type(16)));\n" "typedef int int2 __attribute__((ext_vector_type(2)));\n" "typedef int int3 __attribute__((ext_vector_type(3)));\n" "typedef int int4 __attribute__((ext_vector_type(4)));\n" "typedef int int8 __attribute__((ext_vector_type(8)));\n" "typedef int int16 __attribute__((ext_vector_type(16)));\n" "typedef uint uint2 __attribute__((ext_vector_type(2)));\n" "typedef uint uint3 __attribute__((ext_vector_type(3)));\n" "typedef uint uint4 __attribute__((ext_vector_type(4)));\n" "typedef uint uint8 __attribute__((ext_vector_type(8)));\n" "typedef uint uint16 __attribute__((ext_vector_type(16)));\n" "typedef long long2 __attribute__((ext_vector_type(2)));\n" "typedef long long3 __attribute__((ext_vector_type(3)));\n" "typedef long long4 __attribute__((ext_vector_type(4)));\n" "typedef long long8 __attribute__((ext_vector_type(8)));\n" "typedef long long16 __attribute__((ext_vector_type(16)));\n" "typedef ulong ulong2 __attribute__((ext_vector_type(2)));\n" "typedef ulong ulong3 __attribute__((ext_vector_type(3)));\n" "typedef ulong ulong4 __attribute__((ext_vector_type(4)));\n" "typedef ulong ulong8 __attribute__((ext_vector_type(8)));\n" "typedef ulong ulong16 __attribute__((ext_vector_type(16)));\n" "typedef float float2 __attribute__((ext_vector_type(2)));\n" "typedef float float3 __attribute__((ext_vector_type(3)));\n" "typedef float float4 __attribute__((ext_vector_type(4)));\n" "typedef float float8 __attribute__((ext_vector_type(8)));\n" "typedef float float16 __attribute__((ext_vector_type(16)));\n" "#ifdef cl_khr_fp16\n" "#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" "typedef half half2 __attribute__((ext_vector_type(2)));\n" "typedef half half3 __attribute__((ext_vector_type(3)));\n" "typedef half half4 __attribute__((ext_vector_type(4)));\n" "typedef half half8 __attribute__((ext_vector_type(8)));\n" "typedef half half16 __attribute__((ext_vector_type(16)));\n" "#endif\n" "#ifdef cl_khr_fp64\n" "#if __OPENCL_C_VERSION__ < CL_VERSION_1_2\n" "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n" "#endif\n" "typedef double double2 __attribute__((ext_vector_type(2)));\n" "typedef double double3 __attribute__((ext_vector_type(3)));\n" "typedef double double4 __attribute__((ext_vector_type(4)));\n" "typedef double double8 __attribute__((ext_vector_type(8)));\n" "typedef double double16 __attribute__((ext_vector_type(16)));\n" "#endif\n" "\n" "// An internal alias for half, for use by OpenCLBuiltins.td.\n" "#define __half half\n" "\n" "#if defined(__OPENCL_CPP_VERSION__)\n" "#define NULL nullptr\n" "#elif defined(__OPENCL_C_VERSION__)\n" "#define NULL ((void*)0)\n" "#endif\n" "\n" "/**\n" " * Value of maximum non-infinite single-precision floating-point\n" " * number.\n" " */\n" "#define MAXFLOAT 0x1.fffffep127f\n" "\n" "/**\n" " * A positive float constant expression. HUGE_VALF evaluates\n" " * to +infinity. Used as an error value returned by the built-in\n" " * math functions.\n" " */\n" "#define HUGE_VALF (__builtin_huge_valf())\n" "\n" "/**\n" " * A positive double constant expression. HUGE_VAL evaluates\n" " * to +infinity. Used as an error value returned by the built-in\n" " * math functions.\n" " */\n" "#define HUGE_VAL (__builtin_huge_val())\n" "\n" "/**\n" " * A constant expression of type float representing positive or\n" " * unsigned infinity.\n" " */\n" "#define INFINITY (__builtin_inff())\n" "\n" "/**\n" " * A constant expression of type float representing a quiet NaN.\n" " */\n" "#define NAN as_float(INT_MAX)\n" "\n" "#define FP_ILOGB0 INT_MIN\n" "#define FP_ILOGBNAN INT_MAX\n" "\n" "#define FLT_DIG 6\n" "#define FLT_MANT_DIG 24\n" "#define FLT_MAX_10_EXP +38\n" "#define FLT_MAX_EXP +128\n" "#define FLT_MIN_10_EXP -37\n" "#define FLT_MIN_EXP -125\n" "#define FLT_RADIX 2\n" "#define FLT_MAX 0x1.fffffep127f\n" "#define FLT_MIN 0x1.0p-126f\n" "#define FLT_EPSILON 0x1.0p-23f\n" "\n" "#define M_E_F 2.71828182845904523536028747135266250f\n" "#define M_LOG2E_F 1.44269504088896340735992468100189214f\n" "#define M_LOG10E_F 0.434294481903251827651128918916605082f\n" "#define M_LN2_F 0.693147180559945309417232121458176568f\n" "#define M_LN10_F 2.30258509299404568401799145468436421f\n" "#define M_PI_F 3.14159265358979323846264338327950288f\n" "#define M_PI_2_F 1.57079632679489661923132169163975144f\n" "#define M_PI_4_F 0.785398163397448309615660845819875721f\n" "#define M_1_PI_F 0.318309886183790671537767526745028724f\n" "#define M_2_PI_F 0.636619772367581343075535053490057448f\n" "#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f\n" "#define M_SQRT2_F 1.41421356237309504880168872420969808f\n" "#define M_SQRT1_2_F 0.707106781186547524400844362104849039f\n" "\n" "#define DBL_DIG 15\n" "#define DBL_MANT_DIG 53\n" "#define DBL_MAX_10_EXP +308\n" "#define DBL_MAX_EXP +1024\n" "#define DBL_MIN_10_EXP -307\n" "#define DBL_MIN_EXP -1021\n" "#define DBL_RADIX 2\n" "#define DBL_MAX 0x1.fffffffffffffp1023\n" "#define DBL_MIN 0x1.0p-1022\n" "#define DBL_EPSILON 0x1.0p-52\n" "\n" "#define M_E 0x1.5bf0a8b145769p+1\n" "#define M_LOG2E 0x1.71547652b82fep+0\n" "#define M_LOG10E 0x1.bcb7b1526e50ep-2\n" "#define M_LN2 0x1.62e42fefa39efp-1\n" "#define M_LN10 0x1.26bb1bbb55516p+1\n" "#define M_PI 0x1.921fb54442d18p+1\n" "#define M_PI_2 0x1.921fb54442d18p+0\n" "#define M_PI_4 0x1.921fb54442d18p-1\n" "#define M_1_PI 0x1.45f306dc9c883p-2\n" "#define M_2_PI 0x1.45f306dc9c883p-1\n" "#define M_2_SQRTPI 0x1.20dd750429b6dp+0\n" "#define M_SQRT2 0x1.6a09e667f3bcdp+0\n" "#define M_SQRT1_2 0x1.6a09e667f3bcdp-1\n" "\n" "#ifdef cl_khr_fp16\n" "\n" "#define HALF_DIG 3\n" "#define HALF_MANT_DIG 11\n" "#define HALF_MAX_10_EXP +4\n" "#define HALF_MAX_EXP +16\n" "#define HALF_MIN_10_EXP -4\n" "#define HALF_MIN_EXP -13\n" "#define HALF_RADIX 2\n" "#define HALF_MAX ((0x1.ffcp15h))\n" "#define HALF_MIN ((0x1.0p-14h))\n" "#define HALF_EPSILON ((0x1.0p-10h))\n" "\n" "#define M_E_H 2.71828182845904523536028747135266250h\n" "#define M_LOG2E_H 1.44269504088896340735992468100189214h\n" "#define M_LOG10E_H 0.434294481903251827651128918916605082h\n" "#define M_LN2_H 0.693147180559945309417232121458176568h\n" "#define M_LN10_H 2.30258509299404568401799145468436421h\n" "#define M_PI_H 3.14159265358979323846264338327950288h\n" "#define M_PI_2_H 1.57079632679489661923132169163975144h\n" "#define M_PI_4_H 0.785398163397448309615660845819875721h\n" "#define M_1_PI_H 0.318309886183790671537767526745028724h\n" "#define M_2_PI_H 0.636619772367581343075535053490057448h\n" "#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h\n" "#define M_SQRT2_H 1.41421356237309504880168872420969808h\n" "#define M_SQRT1_2_H 0.707106781186547524400844362104849039h\n" "\n" "#endif //cl_khr_fp16\n" "\n" "#define CHAR_BIT 8\n" "#define SCHAR_MAX 127\n" "#define SCHAR_MIN (-128)\n" "#define UCHAR_MAX 255\n" "#define CHAR_MAX SCHAR_MAX\n" "#define CHAR_MIN SCHAR_MIN\n" "#define USHRT_MAX 65535\n" "#define SHRT_MAX 32767\n" "#define SHRT_MIN (-32768)\n" "#define UINT_MAX 0xffffffff\n" "#define INT_MAX 2147483647\n" "#define INT_MIN (-2147483647-1)\n" "#define ULONG_MAX 0xffffffffffffffffUL\n" "#define LONG_MAX 0x7fffffffffffffffL\n" "#define LONG_MIN (-0x7fffffffffffffffL-1)\n" "\n" "// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions\n" "\n" "// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence\n" "typedef uint cl_mem_fence_flags;\n" "\n" "/**\n" " * Queue a memory fence to ensure correct\n" " * ordering of memory operations to local memory\n" " */\n" "#define CLK_LOCAL_MEM_FENCE 0x01\n" "\n" "/**\n" " * Queue a memory fence to ensure correct\n" " * ordering of memory operations to global memory\n" " */\n" "#define CLK_GLOBAL_MEM_FENCE 0x02\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "typedef enum memory_scope {\n" " memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,\n" " memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,\n" " memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,\n" "#if defined(__opencl_c_atomic_scope_all_devices)\n" " memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" " memory_scope_all_devices = memory_scope_all_svm_devices,\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif // defined(__opencl_c_atomic_scope_all_devices)\n" "/**\n" " * Subgroups have different requirements on forward progress, so just test\n" " * all the relevant macros.\n" " * CL 3.0 sub-groups \"they are not guaranteed to make independent forward progress\"\n" " * KHR subgroups \"Subgroups within a workgroup are independent, make forward progress with respect to each other\"\n" " */\n" "#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)\n" " memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP\n" "#endif\n" "} memory_scope;\n" "\n" "/**\n" " * Queue a memory fence to ensure correct ordering of memory\n" " * operations between work-items of a work-group to\n" " * image memory.\n" " */\n" "#define CLK_IMAGE_MEM_FENCE 0x04\n" "\n" "#ifndef ATOMIC_VAR_INIT\n" "#define ATOMIC_VAR_INIT(x) (x)\n" "#endif //ATOMIC_VAR_INIT\n" "#define ATOMIC_FLAG_INIT 0\n" "\n" "// enum values aligned with what clang uses in EmitAtomicExpr()\n" "typedef enum memory_order\n" "{\n" " memory_order_relaxed = __ATOMIC_RELAXED,\n" " memory_order_acquire = __ATOMIC_ACQUIRE,\n" " memory_order_release = __ATOMIC_RELEASE,\n" " memory_order_acq_rel = __ATOMIC_ACQ_REL,\n" "#if defined(__opencl_c_atomic_order_seq_cst)\n" " memory_order_seq_cst = __ATOMIC_SEQ_CST\n" "#endif\n" "} memory_order;\n" "\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions\n" "\n" "// These values need to match the runtime equivalent\n" "//\n" "// Addressing Mode.\n" "//\n" "#define CLK_ADDRESS_NONE 0\n" "#define CLK_ADDRESS_CLAMP_TO_EDGE 2\n" "#define CLK_ADDRESS_CLAMP 4\n" "#define CLK_ADDRESS_REPEAT 6\n" "#define CLK_ADDRESS_MIRRORED_REPEAT 8\n" "\n" "//\n" "// Coordination Normalization\n" "//\n" "#define CLK_NORMALIZED_COORDS_FALSE 0\n" "#define CLK_NORMALIZED_COORDS_TRUE 1\n" "\n" "//\n" "// Filtering Mode.\n" "//\n" "#define CLK_FILTER_NEAREST 0x10\n" "#define CLK_FILTER_LINEAR 0x20\n" "\n" "#ifdef cl_khr_gl_msaa_sharing\n" "#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "//\n" "// Channel Datatype.\n" "//\n" "#define CLK_SNORM_INT8 0x10D0\n" "#define CLK_SNORM_INT16 0x10D1\n" "#define CLK_UNORM_INT8 0x10D2\n" "#define CLK_UNORM_INT16 0x10D3\n" "#define CLK_UNORM_SHORT_565 0x10D4\n" "#define CLK_UNORM_SHORT_555 0x10D5\n" "#define CLK_UNORM_INT_101010 0x10D6\n" "#define CLK_SIGNED_INT8 0x10D7\n" "#define CLK_SIGNED_INT16 0x10D8\n" "#define CLK_SIGNED_INT32 0x10D9\n" "#define CLK_UNSIGNED_INT8 0x10DA\n" "#define CLK_UNSIGNED_INT16 0x10DB\n" "#define CLK_UNSIGNED_INT32 0x10DC\n" "#define CLK_HALF_FLOAT 0x10DD\n" "#define CLK_FLOAT 0x10DE\n" "#define CLK_UNORM_INT24 0x10DF\n" "#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0\n" "#define CLK_UNORM_INT_101010_2 0x10E0\n" "#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0\n" "\n" "// Channel order, numbering must be aligned with cl_channel_order in cl.h\n" "//\n" "#define CLK_R 0x10B0\n" "#define CLK_A 0x10B1\n" "#define CLK_RG 0x10B2\n" "#define CLK_RA 0x10B3\n" "#define CLK_RGB 0x10B4\n" "#define CLK_RGBA 0x10B5\n" "#define CLK_BGRA 0x10B6\n" "#define CLK_ARGB 0x10B7\n" "#define CLK_INTENSITY 0x10B8\n" "#define CLK_LUMINANCE 0x10B9\n" "#define CLK_Rx 0x10BA\n" "#define CLK_RGx 0x10BB\n" "#define CLK_RGBx 0x10BC\n" "#define CLK_DEPTH 0x10BD\n" "#define CLK_DEPTH_STENCIL 0x10BE\n" "#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0\n" "#define CLK_sRGB 0x10BF\n" "#define CLK_sRGBx 0x10C0\n" "#define CLK_sRGBA 0x10C1\n" "#define CLK_sBGRA 0x10C2\n" "#define CLK_ABGR 0x10C3\n" "#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0\n" "\n" "// OpenCL v2.0 s6.13.16 - Pipe Functions\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))\n" "\n" "// OpenCL v2.0 s6.13.17 - Enqueue Kernels\n" "#define CL_COMPLETE 0x0\n" "#define CL_RUNNING 0x1\n" "#define CL_SUBMITTED 0x2\n" "#define CL_QUEUED 0x3\n" "\n" "#define CLK_SUCCESS 0\n" "#define CLK_ENQUEUE_FAILURE -101\n" "#define CLK_INVALID_QUEUE -102\n" "#define CLK_INVALID_NDRANGE -160\n" "#define CLK_INVALID_EVENT_WAIT_LIST -57\n" "#define CLK_DEVICE_QUEUE_FULL -161\n" "#define CLK_INVALID_ARG_SIZE -51\n" "#define CLK_EVENT_ALLOCATION_FAILURE -100\n" "#define CLK_OUT_OF_RESOURCES -5\n" "\n" "#define CLK_NULL_QUEUE 0\n" "#define CLK_NULL_EVENT (__builtin_astype(((__SIZE_MAX__)), clk_event_t))\n" "\n" "// execution model related definitions\n" "#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0\n" "#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1\n" "#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2\n" "\n" "typedef int kernel_enqueue_flags_t;\n" "typedef int clk_profiling_info;\n" "\n" "// Profiling info name (see capture_event_profiling_info)\n" "#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1\n" "\n" "#define MAX_WORK_DIM 3\n" "\n" "#ifdef __opencl_c_device_enqueue\n" "typedef struct {\n" " unsigned int workDimension;\n" " size_t globalWorkOffset[MAX_WORK_DIM];\n" " size_t globalWorkSize[MAX_WORK_DIM];\n" " size_t localWorkSize[MAX_WORK_DIM];\n" "} ndrange_t;\n" "#endif // __opencl_c_device_enqueue\n" "\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "/**\n" " * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators\n" " * Reinterprets a data type as another data type of the same size\n" " */\n" "#define as_char(x) __builtin_astype((x), char)\n" "#define as_char2(x) __builtin_astype((x), char2)\n" "#define as_char3(x) __builtin_astype((x), char3)\n" "#define as_char4(x) __builtin_astype((x), char4)\n" "#define as_char8(x) __builtin_astype((x), char8)\n" "#define as_char16(x) __builtin_astype((x), char16)\n" "\n" "#define as_uchar(x) __builtin_astype((x), uchar)\n" "#define as_uchar2(x) __builtin_astype((x), uchar2)\n" "#define as_uchar3(x) __builtin_astype((x), uchar3)\n" "#define as_uchar4(x) __builtin_astype((x), uchar4)\n" "#define as_uchar8(x) __builtin_astype((x), uchar8)\n" "#define as_uchar16(x) __builtin_astype((x), uchar16)\n" "\n" "#define as_short(x) __builtin_astype((x), short)\n" "#define as_short2(x) __builtin_astype((x), short2)\n" "#define as_short3(x) __builtin_astype((x), short3)\n" "#define as_short4(x) __builtin_astype((x), short4)\n" "#define as_short8(x) __builtin_astype((x), short8)\n" "#define as_short16(x) __builtin_astype((x), short16)\n" "\n" "#define as_ushort(x) __builtin_astype((x), ushort)\n" "#define as_ushort2(x) __builtin_astype((x), ushort2)\n" "#define as_ushort3(x) __builtin_astype((x), ushort3)\n" "#define as_ushort4(x) __builtin_astype((x), ushort4)\n" "#define as_ushort8(x) __builtin_astype((x), ushort8)\n" "#define as_ushort16(x) __builtin_astype((x), ushort16)\n" "\n" "#define as_int(x) __builtin_astype((x), int)\n" "#define as_int2(x) __builtin_astype((x), int2)\n" "#define as_int3(x) __builtin_astype((x), int3)\n" "#define as_int4(x) __builtin_astype((x), int4)\n" "#define as_int8(x) __builtin_astype((x), int8)\n" "#define as_int16(x) __builtin_astype((x), int16)\n" "\n" "#define as_uint(x) __builtin_astype((x), uint)\n" "#define as_uint2(x) __builtin_astype((x), uint2)\n" "#define as_uint3(x) __builtin_astype((x), uint3)\n" "#define as_uint4(x) __builtin_astype((x), uint4)\n" "#define as_uint8(x) __builtin_astype((x), uint8)\n" "#define as_uint16(x) __builtin_astype((x), uint16)\n" "\n" "#define as_long(x) __builtin_astype((x), long)\n" "#define as_long2(x) __builtin_astype((x), long2)\n" "#define as_long3(x) __builtin_astype((x), long3)\n" "#define as_long4(x) __builtin_astype((x), long4)\n" "#define as_long8(x) __builtin_astype((x), long8)\n" "#define as_long16(x) __builtin_astype((x), long16)\n" "\n" "#define as_ulong(x) __builtin_astype((x), ulong)\n" "#define as_ulong2(x) __builtin_astype((x), ulong2)\n" "#define as_ulong3(x) __builtin_astype((x), ulong3)\n" "#define as_ulong4(x) __builtin_astype((x), ulong4)\n" "#define as_ulong8(x) __builtin_astype((x), ulong8)\n" "#define as_ulong16(x) __builtin_astype((x), ulong16)\n" "\n" "#define as_float(x) __builtin_astype((x), float)\n" "#define as_float2(x) __builtin_astype((x), float2)\n" "#define as_float3(x) __builtin_astype((x), float3)\n" "#define as_float4(x) __builtin_astype((x), float4)\n" "#define as_float8(x) __builtin_astype((x), float8)\n" "#define as_float16(x) __builtin_astype((x), float16)\n" "\n" "#ifdef cl_khr_fp64\n" "#define as_double(x) __builtin_astype((x), double)\n" "#define as_double2(x) __builtin_astype((x), double2)\n" "#define as_double3(x) __builtin_astype((x), double3)\n" "#define as_double4(x) __builtin_astype((x), double4)\n" "#define as_double8(x) __builtin_astype((x), double8)\n" "#define as_double16(x) __builtin_astype((x), double16)\n" "#endif // cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "#define as_half(x) __builtin_astype((x), half)\n" "#define as_half2(x) __builtin_astype((x), half2)\n" "#define as_half3(x) __builtin_astype((x), half3)\n" "#define as_half4(x) __builtin_astype((x), half4)\n" "#define as_half8(x) __builtin_astype((x), half8)\n" "#define as_half16(x) __builtin_astype((x), half16)\n" "#endif // cl_khr_fp16\n" "\n" "#define as_size_t(x) __builtin_astype((x), size_t)\n" "#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t)\n" "#define as_intptr_t(x) __builtin_astype((x), intptr_t)\n" "#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)\n" "\n" "// C++ for OpenCL - __remove_address_space\n" "#if defined(__OPENCL_CPP_VERSION__)\n" "template struct __remove_address_space { using type = _Tp; };\n" "#if defined(__opencl_c_generic_address_space)\n" "template struct __remove_address_space<__generic _Tp> {\n" " using type = _Tp;\n" "};\n" "#endif\n" "template struct __remove_address_space<__global _Tp> {\n" " using type = _Tp;\n" "};\n" "template struct __remove_address_space<__private _Tp> {\n" " using type = _Tp;\n" "};\n" "template struct __remove_address_space<__local _Tp> {\n" " using type = _Tp;\n" "};\n" "template struct __remove_address_space<__constant _Tp> {\n" " using type = _Tp;\n" "};\n" "#endif\n" "\n" "// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers\n" "\n" "#define __kernel_exec(X, typen) __kernel \\\n" " __attribute__((work_group_size_hint(X, 1, 1))) \\\n" " __attribute__((vec_type_hint(typen)))\n" "\n" "#define kernel_exec(X, typen) __kernel \\\n" " __attribute__((work_group_size_hint(X, 1, 1))) \\\n" " __attribute__((vec_type_hint(typen)))\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf\n" "\n" "int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));\n" "#endif\n" "\n" "#ifdef cl_intel_device_side_avc_motion_estimation\n" "\n" "#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0\n" "#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1\n" "#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2\n" "#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0\n" "#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1\n" "#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2\n" "#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0\n" "#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1\n" "#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2\n" "\n" "#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0\n" "#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E\n" "#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D\n" "#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B\n" "#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77\n" "#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F\n" "#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F\n" "#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F\n" "\n" "#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0\n" "#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1\n" "#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2\n" "\n" "#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0\n" "#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1\n" "#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2\n" "#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3\n" "#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4\n" "#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5\n" "#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6\n" "#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7\n" "#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8\n" "\n" "#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0\n" "#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2\n" "\n" "#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0\n" "#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1\n" "#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0\n" "#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1\n" "#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2\n" "#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10\n" "#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15\n" "#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20\n" "#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B\n" "#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30\n" "\n" "#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0\n" "#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2\n" "#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4\n" "#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8\n" "\n" "#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0\n" "#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1\n" "#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2\n" "\n" "#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0\n" "#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000\n" "\n" "#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)\n" "#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)\n" "\n" "#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00\n" "#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80\n" "\n" "#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0\n" "#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6\n" "#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5\n" "#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60\n" "#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10\n" "#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8\n" "#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4\n" "\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7\n" "#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8\n" "#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0\n" "#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1\n" "#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2\n" "#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1\n" "#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2\n" "#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3\n" "\n" "#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0\n" "#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1\n" "\n" "#define CLK_AVC_ME_INITIALIZE_INTEL 0x0\n" "\n" "#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0\n" "\n" "#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0\n" "\n" "#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0\n" "#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0\n" "\n" "#endif // cl_intel_device_side_avc_motion_estimation\n" "\n" "// Disable any extensions we may have enabled previously.\n" "#pragma OPENCL EXTENSION all : disable\n" "\n" "#endif //_OPENCL_BASE_H_\n" "" } , { "/builtins/opencl-c.h" , "//===--- opencl-c.h - OpenCL C language builtin function header -----------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "\n" "#ifndef _OPENCL_H_\n" "#define _OPENCL_H_\n" "\n" "#include \"opencl-c-base.h\"\n" "\n" "#if defined(__opencl_c_images)\n" "#ifndef cl_khr_depth_images\n" "#define cl_khr_depth_images\n" "#endif //cl_khr_depth_images\n" "#endif //defined(__opencl_c_images)\n" "\n" "#if __OPENCL_C_VERSION__ < CL_VERSION_2_0\n" "#ifdef cl_khr_3d_image_writes\n" "#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable\n" "#endif //cl_khr_3d_image_writes\n" "#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0\n" "\n" "#if (defined(__OPENCL_CPP_VERSION__) || \\\n" " (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && \\\n" " (defined(__SPIR__) || defined(__SPIRV__))\n" "#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin\n" "#pragma OPENCL EXTENSION cl_intel_planar_yuv : end\n" "#endif // (defined(__OPENCL_CPP_VERSION__) ||\n" " // (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&\n" " // (defined(__SPIR__) || defined(__SPIRV__))\n" "\n" "#define __ovld __attribute__((overloadable))\n" "#define __conv __attribute__((convergent))\n" "\n" "// Optimizations\n" "#define __purefn __attribute__((pure))\n" "#define __cnfn __attribute__((const))\n" "\n" "\n" "// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions\n" "\n" "char __ovld __cnfn convert_char_rte(char);\n" "char __ovld __cnfn convert_char_sat_rte(char);\n" "char __ovld __cnfn convert_char_rtz(char);\n" "char __ovld __cnfn convert_char_sat_rtz(char);\n" "char __ovld __cnfn convert_char_rtp(char);\n" "char __ovld __cnfn convert_char_sat_rtp(char);\n" "char __ovld __cnfn convert_char_rtn(char);\n" "char __ovld __cnfn convert_char_sat_rtn(char);\n" "char __ovld __cnfn convert_char(char);\n" "char __ovld __cnfn convert_char_sat(char);\n" "char __ovld __cnfn convert_char_rte(uchar);\n" "char __ovld __cnfn convert_char_sat_rte(uchar);\n" "char __ovld __cnfn convert_char_rtz(uchar);\n" "char __ovld __cnfn convert_char_sat_rtz(uchar);\n" "char __ovld __cnfn convert_char_rtp(uchar);\n" "char __ovld __cnfn convert_char_sat_rtp(uchar);\n" "char __ovld __cnfn convert_char_rtn(uchar);\n" "char __ovld __cnfn convert_char_sat_rtn(uchar);\n" "char __ovld __cnfn convert_char(uchar);\n" "char __ovld __cnfn convert_char_sat(uchar);\n" "char __ovld __cnfn convert_char_rte(short);\n" "char __ovld __cnfn convert_char_sat_rte(short);\n" "char __ovld __cnfn convert_char_rtz(short);\n" "char __ovld __cnfn convert_char_sat_rtz(short);\n" "char __ovld __cnfn convert_char_rtp(short);\n" "char __ovld __cnfn convert_char_sat_rtp(short);\n" "char __ovld __cnfn convert_char_rtn(short);\n" "char __ovld __cnfn convert_char_sat_rtn(short);\n" "char __ovld __cnfn convert_char(short);\n" "char __ovld __cnfn convert_char_sat(short);\n" "char __ovld __cnfn convert_char_rte(ushort);\n" "char __ovld __cnfn convert_char_sat_rte(ushort);\n" "char __ovld __cnfn convert_char_rtz(ushort);\n" "char __ovld __cnfn convert_char_sat_rtz(ushort);\n" "char __ovld __cnfn convert_char_rtp(ushort);\n" "char __ovld __cnfn convert_char_sat_rtp(ushort);\n" "char __ovld __cnfn convert_char_rtn(ushort);\n" "char __ovld __cnfn convert_char_sat_rtn(ushort);\n" "char __ovld __cnfn convert_char(ushort);\n" "char __ovld __cnfn convert_char_sat(ushort);\n" "char __ovld __cnfn convert_char_rte(int);\n" "char __ovld __cnfn convert_char_sat_rte(int);\n" "char __ovld __cnfn convert_char_rtz(int);\n" "char __ovld __cnfn convert_char_sat_rtz(int);\n" "char __ovld __cnfn convert_char_rtp(int);\n" "char __ovld __cnfn convert_char_sat_rtp(int);\n" "char __ovld __cnfn convert_char_rtn(int);\n" "char __ovld __cnfn convert_char_sat_rtn(int);\n" "char __ovld __cnfn convert_char(int);\n" "char __ovld __cnfn convert_char_sat(int);\n" "char __ovld __cnfn convert_char_rte(uint);\n" "char __ovld __cnfn convert_char_sat_rte(uint);\n" "char __ovld __cnfn convert_char_rtz(uint);\n" "char __ovld __cnfn convert_char_sat_rtz(uint);\n" "char __ovld __cnfn convert_char_rtp(uint);\n" "char __ovld __cnfn convert_char_sat_rtp(uint);\n" "char __ovld __cnfn convert_char_rtn(uint);\n" "char __ovld __cnfn convert_char_sat_rtn(uint);\n" "char __ovld __cnfn convert_char(uint);\n" "char __ovld __cnfn convert_char_sat(uint);\n" "char __ovld __cnfn convert_char_rte(long);\n" "char __ovld __cnfn convert_char_sat_rte(long);\n" "char __ovld __cnfn convert_char_rtz(long);\n" "char __ovld __cnfn convert_char_sat_rtz(long);\n" "char __ovld __cnfn convert_char_rtp(long);\n" "char __ovld __cnfn convert_char_sat_rtp(long);\n" "char __ovld __cnfn convert_char_rtn(long);\n" "char __ovld __cnfn convert_char_sat_rtn(long);\n" "char __ovld __cnfn convert_char(long);\n" "char __ovld __cnfn convert_char_sat(long);\n" "char __ovld __cnfn convert_char_rte(ulong);\n" "char __ovld __cnfn convert_char_sat_rte(ulong);\n" "char __ovld __cnfn convert_char_rtz(ulong);\n" "char __ovld __cnfn convert_char_sat_rtz(ulong);\n" "char __ovld __cnfn convert_char_rtp(ulong);\n" "char __ovld __cnfn convert_char_sat_rtp(ulong);\n" "char __ovld __cnfn convert_char_rtn(ulong);\n" "char __ovld __cnfn convert_char_sat_rtn(ulong);\n" "char __ovld __cnfn convert_char(ulong);\n" "char __ovld __cnfn convert_char_sat(ulong);\n" "char __ovld __cnfn convert_char_rte(float);\n" "char __ovld __cnfn convert_char_sat_rte(float);\n" "char __ovld __cnfn convert_char_rtz(float);\n" "char __ovld __cnfn convert_char_sat_rtz(float);\n" "char __ovld __cnfn convert_char_rtp(float);\n" "char __ovld __cnfn convert_char_sat_rtp(float);\n" "char __ovld __cnfn convert_char_rtn(float);\n" "char __ovld __cnfn convert_char_sat_rtn(float);\n" "char __ovld __cnfn convert_char(float);\n" "char __ovld __cnfn convert_char_sat(float);\n" "uchar __ovld __cnfn convert_uchar_rte(char);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(char);\n" "uchar __ovld __cnfn convert_uchar_rtz(char);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(char);\n" "uchar __ovld __cnfn convert_uchar_rtp(char);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(char);\n" "uchar __ovld __cnfn convert_uchar_rtn(char);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(char);\n" "uchar __ovld __cnfn convert_uchar(char);\n" "uchar __ovld __cnfn convert_uchar_sat(char);\n" "uchar __ovld __cnfn convert_uchar_rte(uchar);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(uchar);\n" "uchar __ovld __cnfn convert_uchar_rtz(uchar);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);\n" "uchar __ovld __cnfn convert_uchar_rtp(uchar);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);\n" "uchar __ovld __cnfn convert_uchar_rtn(uchar);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);\n" "uchar __ovld __cnfn convert_uchar(uchar);\n" "uchar __ovld __cnfn convert_uchar_sat(uchar);\n" "uchar __ovld __cnfn convert_uchar_rte(short);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(short);\n" "uchar __ovld __cnfn convert_uchar_rtz(short);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(short);\n" "uchar __ovld __cnfn convert_uchar_rtp(short);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(short);\n" "uchar __ovld __cnfn convert_uchar_rtn(short);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(short);\n" "uchar __ovld __cnfn convert_uchar(short);\n" "uchar __ovld __cnfn convert_uchar_sat(short);\n" "uchar __ovld __cnfn convert_uchar_rte(ushort);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(ushort);\n" "uchar __ovld __cnfn convert_uchar_rtz(ushort);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);\n" "uchar __ovld __cnfn convert_uchar_rtp(ushort);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);\n" "uchar __ovld __cnfn convert_uchar_rtn(ushort);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);\n" "uchar __ovld __cnfn convert_uchar(ushort);\n" "uchar __ovld __cnfn convert_uchar_sat(ushort);\n" "uchar __ovld __cnfn convert_uchar_rte(int);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(int);\n" "uchar __ovld __cnfn convert_uchar_rtz(int);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(int);\n" "uchar __ovld __cnfn convert_uchar_rtp(int);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(int);\n" "uchar __ovld __cnfn convert_uchar_rtn(int);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(int);\n" "uchar __ovld __cnfn convert_uchar(int);\n" "uchar __ovld __cnfn convert_uchar_sat(int);\n" "uchar __ovld __cnfn convert_uchar_rte(uint);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(uint);\n" "uchar __ovld __cnfn convert_uchar_rtz(uint);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(uint);\n" "uchar __ovld __cnfn convert_uchar_rtp(uint);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(uint);\n" "uchar __ovld __cnfn convert_uchar_rtn(uint);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(uint);\n" "uchar __ovld __cnfn convert_uchar(uint);\n" "uchar __ovld __cnfn convert_uchar_sat(uint);\n" "uchar __ovld __cnfn convert_uchar_rte(long);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(long);\n" "uchar __ovld __cnfn convert_uchar_rtz(long);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(long);\n" "uchar __ovld __cnfn convert_uchar_rtp(long);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(long);\n" "uchar __ovld __cnfn convert_uchar_rtn(long);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(long);\n" "uchar __ovld __cnfn convert_uchar(long);\n" "uchar __ovld __cnfn convert_uchar_sat(long);\n" "uchar __ovld __cnfn convert_uchar_rte(ulong);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(ulong);\n" "uchar __ovld __cnfn convert_uchar_rtz(ulong);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);\n" "uchar __ovld __cnfn convert_uchar_rtp(ulong);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);\n" "uchar __ovld __cnfn convert_uchar_rtn(ulong);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);\n" "uchar __ovld __cnfn convert_uchar(ulong);\n" "uchar __ovld __cnfn convert_uchar_sat(ulong);\n" "uchar __ovld __cnfn convert_uchar_rte(float);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(float);\n" "uchar __ovld __cnfn convert_uchar_rtz(float);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(float);\n" "uchar __ovld __cnfn convert_uchar_rtp(float);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(float);\n" "uchar __ovld __cnfn convert_uchar_rtn(float);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(float);\n" "uchar __ovld __cnfn convert_uchar(float);\n" "uchar __ovld __cnfn convert_uchar_sat(float);\n" "\n" "short __ovld __cnfn convert_short_rte(char);\n" "short __ovld __cnfn convert_short_sat_rte(char);\n" "short __ovld __cnfn convert_short_rtz(char);\n" "short __ovld __cnfn convert_short_sat_rtz(char);\n" "short __ovld __cnfn convert_short_rtp(char);\n" "short __ovld __cnfn convert_short_sat_rtp(char);\n" "short __ovld __cnfn convert_short_rtn(char);\n" "short __ovld __cnfn convert_short_sat_rtn(char);\n" "short __ovld __cnfn convert_short(char);\n" "short __ovld __cnfn convert_short_sat(char);\n" "short __ovld __cnfn convert_short_rte(uchar);\n" "short __ovld __cnfn convert_short_sat_rte(uchar);\n" "short __ovld __cnfn convert_short_rtz(uchar);\n" "short __ovld __cnfn convert_short_sat_rtz(uchar);\n" "short __ovld __cnfn convert_short_rtp(uchar);\n" "short __ovld __cnfn convert_short_sat_rtp(uchar);\n" "short __ovld __cnfn convert_short_rtn(uchar);\n" "short __ovld __cnfn convert_short_sat_rtn(uchar);\n" "short __ovld __cnfn convert_short(uchar);\n" "short __ovld __cnfn convert_short_sat(uchar);\n" "short __ovld __cnfn convert_short_rte(short);\n" "short __ovld __cnfn convert_short_sat_rte(short);\n" "short __ovld __cnfn convert_short_rtz(short);\n" "short __ovld __cnfn convert_short_sat_rtz(short);\n" "short __ovld __cnfn convert_short_rtp(short);\n" "short __ovld __cnfn convert_short_sat_rtp(short);\n" "short __ovld __cnfn convert_short_rtn(short);\n" "short __ovld __cnfn convert_short_sat_rtn(short);\n" "short __ovld __cnfn convert_short(short);\n" "short __ovld __cnfn convert_short_sat(short);\n" "short __ovld __cnfn convert_short_rte(ushort);\n" "short __ovld __cnfn convert_short_sat_rte(ushort);\n" "short __ovld __cnfn convert_short_rtz(ushort);\n" "short __ovld __cnfn convert_short_sat_rtz(ushort);\n" "short __ovld __cnfn convert_short_rtp(ushort);\n" "short __ovld __cnfn convert_short_sat_rtp(ushort);\n" "short __ovld __cnfn convert_short_rtn(ushort);\n" "short __ovld __cnfn convert_short_sat_rtn(ushort);\n" "short __ovld __cnfn convert_short(ushort);\n" "short __ovld __cnfn convert_short_sat(ushort);\n" "short __ovld __cnfn convert_short_rte(int);\n" "short __ovld __cnfn convert_short_sat_rte(int);\n" "short __ovld __cnfn convert_short_rtz(int);\n" "short __ovld __cnfn convert_short_sat_rtz(int);\n" "short __ovld __cnfn convert_short_rtp(int);\n" "short __ovld __cnfn convert_short_sat_rtp(int);\n" "short __ovld __cnfn convert_short_rtn(int);\n" "short __ovld __cnfn convert_short_sat_rtn(int);\n" "short __ovld __cnfn convert_short(int);\n" "short __ovld __cnfn convert_short_sat(int);\n" "short __ovld __cnfn convert_short_rte(uint);\n" "short __ovld __cnfn convert_short_sat_rte(uint);\n" "short __ovld __cnfn convert_short_rtz(uint);\n" "short __ovld __cnfn convert_short_sat_rtz(uint);\n" "short __ovld __cnfn convert_short_rtp(uint);\n" "short __ovld __cnfn convert_short_sat_rtp(uint);\n" "short __ovld __cnfn convert_short_rtn(uint);\n" "short __ovld __cnfn convert_short_sat_rtn(uint);\n" "short __ovld __cnfn convert_short(uint);\n" "short __ovld __cnfn convert_short_sat(uint);\n" "short __ovld __cnfn convert_short_rte(long);\n" "short __ovld __cnfn convert_short_sat_rte(long);\n" "short __ovld __cnfn convert_short_rtz(long);\n" "short __ovld __cnfn convert_short_sat_rtz(long);\n" "short __ovld __cnfn convert_short_rtp(long);\n" "short __ovld __cnfn convert_short_sat_rtp(long);\n" "short __ovld __cnfn convert_short_rtn(long);\n" "short __ovld __cnfn convert_short_sat_rtn(long);\n" "short __ovld __cnfn convert_short(long);\n" "short __ovld __cnfn convert_short_sat(long);\n" "short __ovld __cnfn convert_short_rte(ulong);\n" "short __ovld __cnfn convert_short_sat_rte(ulong);\n" "short __ovld __cnfn convert_short_rtz(ulong);\n" "short __ovld __cnfn convert_short_sat_rtz(ulong);\n" "short __ovld __cnfn convert_short_rtp(ulong);\n" "short __ovld __cnfn convert_short_sat_rtp(ulong);\n" "short __ovld __cnfn convert_short_rtn(ulong);\n" "short __ovld __cnfn convert_short_sat_rtn(ulong);\n" "short __ovld __cnfn convert_short(ulong);\n" "short __ovld __cnfn convert_short_sat(ulong);\n" "short __ovld __cnfn convert_short_rte(float);\n" "short __ovld __cnfn convert_short_sat_rte(float);\n" "short __ovld __cnfn convert_short_rtz(float);\n" "short __ovld __cnfn convert_short_sat_rtz(float);\n" "short __ovld __cnfn convert_short_rtp(float);\n" "short __ovld __cnfn convert_short_sat_rtp(float);\n" "short __ovld __cnfn convert_short_rtn(float);\n" "short __ovld __cnfn convert_short_sat_rtn(float);\n" "short __ovld __cnfn convert_short(float);\n" "short __ovld __cnfn convert_short_sat(float);\n" "ushort __ovld __cnfn convert_ushort_rte(char);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(char);\n" "ushort __ovld __cnfn convert_ushort_rtz(char);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(char);\n" "ushort __ovld __cnfn convert_ushort_rtp(char);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(char);\n" "ushort __ovld __cnfn convert_ushort_rtn(char);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(char);\n" "ushort __ovld __cnfn convert_ushort(char);\n" "ushort __ovld __cnfn convert_ushort_sat(char);\n" "ushort __ovld __cnfn convert_ushort_rte(uchar);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(uchar);\n" "ushort __ovld __cnfn convert_ushort_rtz(uchar);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);\n" "ushort __ovld __cnfn convert_ushort_rtp(uchar);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);\n" "ushort __ovld __cnfn convert_ushort_rtn(uchar);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);\n" "ushort __ovld __cnfn convert_ushort(uchar);\n" "ushort __ovld __cnfn convert_ushort_sat(uchar);\n" "ushort __ovld __cnfn convert_ushort_rte(short);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(short);\n" "ushort __ovld __cnfn convert_ushort_rtz(short);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(short);\n" "ushort __ovld __cnfn convert_ushort_rtp(short);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(short);\n" "ushort __ovld __cnfn convert_ushort_rtn(short);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(short);\n" "ushort __ovld __cnfn convert_ushort(short);\n" "ushort __ovld __cnfn convert_ushort_sat(short);\n" "ushort __ovld __cnfn convert_ushort_rte(ushort);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(ushort);\n" "ushort __ovld __cnfn convert_ushort_rtz(ushort);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);\n" "ushort __ovld __cnfn convert_ushort_rtp(ushort);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);\n" "ushort __ovld __cnfn convert_ushort_rtn(ushort);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);\n" "ushort __ovld __cnfn convert_ushort(ushort);\n" "ushort __ovld __cnfn convert_ushort_sat(ushort);\n" "ushort __ovld __cnfn convert_ushort_rte(int);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(int);\n" "ushort __ovld __cnfn convert_ushort_rtz(int);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(int);\n" "ushort __ovld __cnfn convert_ushort_rtp(int);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(int);\n" "ushort __ovld __cnfn convert_ushort_rtn(int);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(int);\n" "ushort __ovld __cnfn convert_ushort(int);\n" "ushort __ovld __cnfn convert_ushort_sat(int);\n" "ushort __ovld __cnfn convert_ushort_rte(uint);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(uint);\n" "ushort __ovld __cnfn convert_ushort_rtz(uint);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(uint);\n" "ushort __ovld __cnfn convert_ushort_rtp(uint);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(uint);\n" "ushort __ovld __cnfn convert_ushort_rtn(uint);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(uint);\n" "ushort __ovld __cnfn convert_ushort(uint);\n" "ushort __ovld __cnfn convert_ushort_sat(uint);\n" "ushort __ovld __cnfn convert_ushort_rte(long);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(long);\n" "ushort __ovld __cnfn convert_ushort_rtz(long);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(long);\n" "ushort __ovld __cnfn convert_ushort_rtp(long);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(long);\n" "ushort __ovld __cnfn convert_ushort_rtn(long);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(long);\n" "ushort __ovld __cnfn convert_ushort(long);\n" "ushort __ovld __cnfn convert_ushort_sat(long);\n" "ushort __ovld __cnfn convert_ushort_rte(ulong);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(ulong);\n" "ushort __ovld __cnfn convert_ushort_rtz(ulong);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);\n" "ushort __ovld __cnfn convert_ushort_rtp(ulong);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);\n" "ushort __ovld __cnfn convert_ushort_rtn(ulong);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);\n" "ushort __ovld __cnfn convert_ushort(ulong);\n" "ushort __ovld __cnfn convert_ushort_sat(ulong);\n" "ushort __ovld __cnfn convert_ushort_rte(float);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(float);\n" "ushort __ovld __cnfn convert_ushort_rtz(float);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(float);\n" "ushort __ovld __cnfn convert_ushort_rtp(float);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(float);\n" "ushort __ovld __cnfn convert_ushort_rtn(float);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(float);\n" "ushort __ovld __cnfn convert_ushort(float);\n" "ushort __ovld __cnfn convert_ushort_sat(float);\n" "int __ovld __cnfn convert_int_rte(char);\n" "int __ovld __cnfn convert_int_sat_rte(char);\n" "int __ovld __cnfn convert_int_rtz(char);\n" "int __ovld __cnfn convert_int_sat_rtz(char);\n" "int __ovld __cnfn convert_int_rtp(char);\n" "int __ovld __cnfn convert_int_sat_rtp(char);\n" "int __ovld __cnfn convert_int_rtn(char);\n" "int __ovld __cnfn convert_int_sat_rtn(char);\n" "int __ovld __cnfn convert_int(char);\n" "int __ovld __cnfn convert_int_sat(char);\n" "int __ovld __cnfn convert_int_rte(uchar);\n" "int __ovld __cnfn convert_int_sat_rte(uchar);\n" "int __ovld __cnfn convert_int_rtz(uchar);\n" "int __ovld __cnfn convert_int_sat_rtz(uchar);\n" "int __ovld __cnfn convert_int_rtp(uchar);\n" "int __ovld __cnfn convert_int_sat_rtp(uchar);\n" "int __ovld __cnfn convert_int_rtn(uchar);\n" "int __ovld __cnfn convert_int_sat_rtn(uchar);\n" "int __ovld __cnfn convert_int(uchar);\n" "int __ovld __cnfn convert_int_sat(uchar);\n" "int __ovld __cnfn convert_int_rte(short);\n" "int __ovld __cnfn convert_int_sat_rte(short);\n" "int __ovld __cnfn convert_int_rtz(short);\n" "int __ovld __cnfn convert_int_sat_rtz(short);\n" "int __ovld __cnfn convert_int_rtp(short);\n" "int __ovld __cnfn convert_int_sat_rtp(short);\n" "int __ovld __cnfn convert_int_rtn(short);\n" "int __ovld __cnfn convert_int_sat_rtn(short);\n" "int __ovld __cnfn convert_int(short);\n" "int __ovld __cnfn convert_int_sat(short);\n" "int __ovld __cnfn convert_int_rte(ushort);\n" "int __ovld __cnfn convert_int_sat_rte(ushort);\n" "int __ovld __cnfn convert_int_rtz(ushort);\n" "int __ovld __cnfn convert_int_sat_rtz(ushort);\n" "int __ovld __cnfn convert_int_rtp(ushort);\n" "int __ovld __cnfn convert_int_sat_rtp(ushort);\n" "int __ovld __cnfn convert_int_rtn(ushort);\n" "int __ovld __cnfn convert_int_sat_rtn(ushort);\n" "int __ovld __cnfn convert_int(ushort);\n" "int __ovld __cnfn convert_int_sat(ushort);\n" "int __ovld __cnfn convert_int_rte(int);\n" "int __ovld __cnfn convert_int_sat_rte(int);\n" "int __ovld __cnfn convert_int_rtz(int);\n" "int __ovld __cnfn convert_int_sat_rtz(int);\n" "int __ovld __cnfn convert_int_rtp(int);\n" "int __ovld __cnfn convert_int_sat_rtp(int);\n" "int __ovld __cnfn convert_int_rtn(int);\n" "int __ovld __cnfn convert_int_sat_rtn(int);\n" "int __ovld __cnfn convert_int(int);\n" "int __ovld __cnfn convert_int_sat(int);\n" "int __ovld __cnfn convert_int_rte(uint);\n" "int __ovld __cnfn convert_int_sat_rte(uint);\n" "int __ovld __cnfn convert_int_rtz(uint);\n" "int __ovld __cnfn convert_int_sat_rtz(uint);\n" "int __ovld __cnfn convert_int_rtp(uint);\n" "int __ovld __cnfn convert_int_sat_rtp(uint);\n" "int __ovld __cnfn convert_int_rtn(uint);\n" "int __ovld __cnfn convert_int_sat_rtn(uint);\n" "int __ovld __cnfn convert_int(uint);\n" "int __ovld __cnfn convert_int_sat(uint);\n" "int __ovld __cnfn convert_int_rte(long);\n" "int __ovld __cnfn convert_int_sat_rte(long);\n" "int __ovld __cnfn convert_int_rtz(long);\n" "int __ovld __cnfn convert_int_sat_rtz(long);\n" "int __ovld __cnfn convert_int_rtp(long);\n" "int __ovld __cnfn convert_int_sat_rtp(long);\n" "int __ovld __cnfn convert_int_rtn(long);\n" "int __ovld __cnfn convert_int_sat_rtn(long);\n" "int __ovld __cnfn convert_int(long);\n" "int __ovld __cnfn convert_int_sat(long);\n" "int __ovld __cnfn convert_int_rte(ulong);\n" "int __ovld __cnfn convert_int_sat_rte(ulong);\n" "int __ovld __cnfn convert_int_rtz(ulong);\n" "int __ovld __cnfn convert_int_sat_rtz(ulong);\n" "int __ovld __cnfn convert_int_rtp(ulong);\n" "int __ovld __cnfn convert_int_sat_rtp(ulong);\n" "int __ovld __cnfn convert_int_rtn(ulong);\n" "int __ovld __cnfn convert_int_sat_rtn(ulong);\n" "int __ovld __cnfn convert_int(ulong);\n" "int __ovld __cnfn convert_int_sat(ulong);\n" "int __ovld __cnfn convert_int_rte(float);\n" "int __ovld __cnfn convert_int_sat_rte(float);\n" "int __ovld __cnfn convert_int_rtz(float);\n" "int __ovld __cnfn convert_int_sat_rtz(float);\n" "int __ovld __cnfn convert_int_rtp(float);\n" "int __ovld __cnfn convert_int_sat_rtp(float);\n" "int __ovld __cnfn convert_int_rtn(float);\n" "int __ovld __cnfn convert_int_sat_rtn(float);\n" "int __ovld __cnfn convert_int(float);\n" "int __ovld __cnfn convert_int_sat(float);\n" "uint __ovld __cnfn convert_uint_rte(char);\n" "uint __ovld __cnfn convert_uint_sat_rte(char);\n" "uint __ovld __cnfn convert_uint_rtz(char);\n" "uint __ovld __cnfn convert_uint_sat_rtz(char);\n" "uint __ovld __cnfn convert_uint_rtp(char);\n" "uint __ovld __cnfn convert_uint_sat_rtp(char);\n" "uint __ovld __cnfn convert_uint_rtn(char);\n" "uint __ovld __cnfn convert_uint_sat_rtn(char);\n" "uint __ovld __cnfn convert_uint(char);\n" "uint __ovld __cnfn convert_uint_sat(char);\n" "uint __ovld __cnfn convert_uint_rte(uchar);\n" "uint __ovld __cnfn convert_uint_sat_rte(uchar);\n" "uint __ovld __cnfn convert_uint_rtz(uchar);\n" "uint __ovld __cnfn convert_uint_sat_rtz(uchar);\n" "uint __ovld __cnfn convert_uint_rtp(uchar);\n" "uint __ovld __cnfn convert_uint_sat_rtp(uchar);\n" "uint __ovld __cnfn convert_uint_rtn(uchar);\n" "uint __ovld __cnfn convert_uint_sat_rtn(uchar);\n" "uint __ovld __cnfn convert_uint(uchar);\n" "uint __ovld __cnfn convert_uint_sat(uchar);\n" "uint __ovld __cnfn convert_uint_rte(short);\n" "uint __ovld __cnfn convert_uint_sat_rte(short);\n" "uint __ovld __cnfn convert_uint_rtz(short);\n" "uint __ovld __cnfn convert_uint_sat_rtz(short);\n" "uint __ovld __cnfn convert_uint_rtp(short);\n" "uint __ovld __cnfn convert_uint_sat_rtp(short);\n" "uint __ovld __cnfn convert_uint_rtn(short);\n" "uint __ovld __cnfn convert_uint_sat_rtn(short);\n" "uint __ovld __cnfn convert_uint(short);\n" "uint __ovld __cnfn convert_uint_sat(short);\n" "uint __ovld __cnfn convert_uint_rte(ushort);\n" "uint __ovld __cnfn convert_uint_sat_rte(ushort);\n" "uint __ovld __cnfn convert_uint_rtz(ushort);\n" "uint __ovld __cnfn convert_uint_sat_rtz(ushort);\n" "uint __ovld __cnfn convert_uint_rtp(ushort);\n" "uint __ovld __cnfn convert_uint_sat_rtp(ushort);\n" "uint __ovld __cnfn convert_uint_rtn(ushort);\n" "uint __ovld __cnfn convert_uint_sat_rtn(ushort);\n" "uint __ovld __cnfn convert_uint(ushort);\n" "uint __ovld __cnfn convert_uint_sat(ushort);\n" "uint __ovld __cnfn convert_uint_rte(int);\n" "uint __ovld __cnfn convert_uint_sat_rte(int);\n" "uint __ovld __cnfn convert_uint_rtz(int);\n" "uint __ovld __cnfn convert_uint_sat_rtz(int);\n" "uint __ovld __cnfn convert_uint_rtp(int);\n" "uint __ovld __cnfn convert_uint_sat_rtp(int);\n" "uint __ovld __cnfn convert_uint_rtn(int);\n" "uint __ovld __cnfn convert_uint_sat_rtn(int);\n" "uint __ovld __cnfn convert_uint(int);\n" "uint __ovld __cnfn convert_uint_sat(int);\n" "uint __ovld __cnfn convert_uint_rte(uint);\n" "uint __ovld __cnfn convert_uint_sat_rte(uint);\n" "uint __ovld __cnfn convert_uint_rtz(uint);\n" "uint __ovld __cnfn convert_uint_sat_rtz(uint);\n" "uint __ovld __cnfn convert_uint_rtp(uint);\n" "uint __ovld __cnfn convert_uint_sat_rtp(uint);\n" "uint __ovld __cnfn convert_uint_rtn(uint);\n" "uint __ovld __cnfn convert_uint_sat_rtn(uint);\n" "uint __ovld __cnfn convert_uint(uint);\n" "uint __ovld __cnfn convert_uint_sat(uint);\n" "uint __ovld __cnfn convert_uint_rte(long);\n" "uint __ovld __cnfn convert_uint_sat_rte(long);\n" "uint __ovld __cnfn convert_uint_rtz(long);\n" "uint __ovld __cnfn convert_uint_sat_rtz(long);\n" "uint __ovld __cnfn convert_uint_rtp(long);\n" "uint __ovld __cnfn convert_uint_sat_rtp(long);\n" "uint __ovld __cnfn convert_uint_rtn(long);\n" "uint __ovld __cnfn convert_uint_sat_rtn(long);\n" "uint __ovld __cnfn convert_uint(long);\n" "uint __ovld __cnfn convert_uint_sat(long);\n" "uint __ovld __cnfn convert_uint_rte(ulong);\n" "uint __ovld __cnfn convert_uint_sat_rte(ulong);\n" "uint __ovld __cnfn convert_uint_rtz(ulong);\n" "uint __ovld __cnfn convert_uint_sat_rtz(ulong);\n" "uint __ovld __cnfn convert_uint_rtp(ulong);\n" "uint __ovld __cnfn convert_uint_sat_rtp(ulong);\n" "uint __ovld __cnfn convert_uint_rtn(ulong);\n" "uint __ovld __cnfn convert_uint_sat_rtn(ulong);\n" "uint __ovld __cnfn convert_uint(ulong);\n" "uint __ovld __cnfn convert_uint_sat(ulong);\n" "uint __ovld __cnfn convert_uint_rte(float);\n" "uint __ovld __cnfn convert_uint_sat_rte(float);\n" "uint __ovld __cnfn convert_uint_rtz(float);\n" "uint __ovld __cnfn convert_uint_sat_rtz(float);\n" "uint __ovld __cnfn convert_uint_rtp(float);\n" "uint __ovld __cnfn convert_uint_sat_rtp(float);\n" "uint __ovld __cnfn convert_uint_rtn(float);\n" "uint __ovld __cnfn convert_uint_sat_rtn(float);\n" "uint __ovld __cnfn convert_uint(float);\n" "uint __ovld __cnfn convert_uint_sat(float);\n" "long __ovld __cnfn convert_long_rte(char);\n" "long __ovld __cnfn convert_long_sat_rte(char);\n" "long __ovld __cnfn convert_long_rtz(char);\n" "long __ovld __cnfn convert_long_sat_rtz(char);\n" "long __ovld __cnfn convert_long_rtp(char);\n" "long __ovld __cnfn convert_long_sat_rtp(char);\n" "long __ovld __cnfn convert_long_rtn(char);\n" "long __ovld __cnfn convert_long_sat_rtn(char);\n" "long __ovld __cnfn convert_long(char);\n" "long __ovld __cnfn convert_long_sat(char);\n" "long __ovld __cnfn convert_long_rte(uchar);\n" "long __ovld __cnfn convert_long_sat_rte(uchar);\n" "long __ovld __cnfn convert_long_rtz(uchar);\n" "long __ovld __cnfn convert_long_sat_rtz(uchar);\n" "long __ovld __cnfn convert_long_rtp(uchar);\n" "long __ovld __cnfn convert_long_sat_rtp(uchar);\n" "long __ovld __cnfn convert_long_rtn(uchar);\n" "long __ovld __cnfn convert_long_sat_rtn(uchar);\n" "long __ovld __cnfn convert_long(uchar);\n" "long __ovld __cnfn convert_long_sat(uchar);\n" "long __ovld __cnfn convert_long_rte(short);\n" "long __ovld __cnfn convert_long_sat_rte(short);\n" "long __ovld __cnfn convert_long_rtz(short);\n" "long __ovld __cnfn convert_long_sat_rtz(short);\n" "long __ovld __cnfn convert_long_rtp(short);\n" "long __ovld __cnfn convert_long_sat_rtp(short);\n" "long __ovld __cnfn convert_long_rtn(short);\n" "long __ovld __cnfn convert_long_sat_rtn(short);\n" "long __ovld __cnfn convert_long(short);\n" "long __ovld __cnfn convert_long_sat(short);\n" "long __ovld __cnfn convert_long_rte(ushort);\n" "long __ovld __cnfn convert_long_sat_rte(ushort);\n" "long __ovld __cnfn convert_long_rtz(ushort);\n" "long __ovld __cnfn convert_long_sat_rtz(ushort);\n" "long __ovld __cnfn convert_long_rtp(ushort);\n" "long __ovld __cnfn convert_long_sat_rtp(ushort);\n" "long __ovld __cnfn convert_long_rtn(ushort);\n" "long __ovld __cnfn convert_long_sat_rtn(ushort);\n" "long __ovld __cnfn convert_long(ushort);\n" "long __ovld __cnfn convert_long_sat(ushort);\n" "long __ovld __cnfn convert_long_rte(int);\n" "long __ovld __cnfn convert_long_sat_rte(int);\n" "long __ovld __cnfn convert_long_rtz(int);\n" "long __ovld __cnfn convert_long_sat_rtz(int);\n" "long __ovld __cnfn convert_long_rtp(int);\n" "long __ovld __cnfn convert_long_sat_rtp(int);\n" "long __ovld __cnfn convert_long_rtn(int);\n" "long __ovld __cnfn convert_long_sat_rtn(int);\n" "long __ovld __cnfn convert_long(int);\n" "long __ovld __cnfn convert_long_sat(int);\n" "long __ovld __cnfn convert_long_rte(uint);\n" "long __ovld __cnfn convert_long_sat_rte(uint);\n" "long __ovld __cnfn convert_long_rtz(uint);\n" "long __ovld __cnfn convert_long_sat_rtz(uint);\n" "long __ovld __cnfn convert_long_rtp(uint);\n" "long __ovld __cnfn convert_long_sat_rtp(uint);\n" "long __ovld __cnfn convert_long_rtn(uint);\n" "long __ovld __cnfn convert_long_sat_rtn(uint);\n" "long __ovld __cnfn convert_long(uint);\n" "long __ovld __cnfn convert_long_sat(uint);\n" "long __ovld __cnfn convert_long_rte(long);\n" "long __ovld __cnfn convert_long_sat_rte(long);\n" "long __ovld __cnfn convert_long_rtz(long);\n" "long __ovld __cnfn convert_long_sat_rtz(long);\n" "long __ovld __cnfn convert_long_rtp(long);\n" "long __ovld __cnfn convert_long_sat_rtp(long);\n" "long __ovld __cnfn convert_long_rtn(long);\n" "long __ovld __cnfn convert_long_sat_rtn(long);\n" "long __ovld __cnfn convert_long(long);\n" "long __ovld __cnfn convert_long_sat(long);\n" "long __ovld __cnfn convert_long_rte(ulong);\n" "long __ovld __cnfn convert_long_sat_rte(ulong);\n" "long __ovld __cnfn convert_long_rtz(ulong);\n" "long __ovld __cnfn convert_long_sat_rtz(ulong);\n" "long __ovld __cnfn convert_long_rtp(ulong);\n" "long __ovld __cnfn convert_long_sat_rtp(ulong);\n" "long __ovld __cnfn convert_long_rtn(ulong);\n" "long __ovld __cnfn convert_long_sat_rtn(ulong);\n" "long __ovld __cnfn convert_long(ulong);\n" "long __ovld __cnfn convert_long_sat(ulong);\n" "long __ovld __cnfn convert_long_rte(float);\n" "long __ovld __cnfn convert_long_sat_rte(float);\n" "long __ovld __cnfn convert_long_rtz(float);\n" "long __ovld __cnfn convert_long_sat_rtz(float);\n" "long __ovld __cnfn convert_long_rtp(float);\n" "long __ovld __cnfn convert_long_sat_rtp(float);\n" "long __ovld __cnfn convert_long_rtn(float);\n" "long __ovld __cnfn convert_long_sat_rtn(float);\n" "long __ovld __cnfn convert_long(float);\n" "long __ovld __cnfn convert_long_sat(float);\n" "ulong __ovld __cnfn convert_ulong_rte(char);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(char);\n" "ulong __ovld __cnfn convert_ulong_rtz(char);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(char);\n" "ulong __ovld __cnfn convert_ulong_rtp(char);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(char);\n" "ulong __ovld __cnfn convert_ulong_rtn(char);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(char);\n" "ulong __ovld __cnfn convert_ulong(char);\n" "ulong __ovld __cnfn convert_ulong_sat(char);\n" "ulong __ovld __cnfn convert_ulong_rte(uchar);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(uchar);\n" "ulong __ovld __cnfn convert_ulong_rtz(uchar);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);\n" "ulong __ovld __cnfn convert_ulong_rtp(uchar);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);\n" "ulong __ovld __cnfn convert_ulong_rtn(uchar);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);\n" "ulong __ovld __cnfn convert_ulong(uchar);\n" "ulong __ovld __cnfn convert_ulong_sat(uchar);\n" "ulong __ovld __cnfn convert_ulong_rte(short);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(short);\n" "ulong __ovld __cnfn convert_ulong_rtz(short);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(short);\n" "ulong __ovld __cnfn convert_ulong_rtp(short);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(short);\n" "ulong __ovld __cnfn convert_ulong_rtn(short);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(short);\n" "ulong __ovld __cnfn convert_ulong(short);\n" "ulong __ovld __cnfn convert_ulong_sat(short);\n" "ulong __ovld __cnfn convert_ulong_rte(ushort);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(ushort);\n" "ulong __ovld __cnfn convert_ulong_rtz(ushort);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);\n" "ulong __ovld __cnfn convert_ulong_rtp(ushort);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);\n" "ulong __ovld __cnfn convert_ulong_rtn(ushort);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);\n" "ulong __ovld __cnfn convert_ulong(ushort);\n" "ulong __ovld __cnfn convert_ulong_sat(ushort);\n" "ulong __ovld __cnfn convert_ulong_rte(int);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(int);\n" "ulong __ovld __cnfn convert_ulong_rtz(int);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(int);\n" "ulong __ovld __cnfn convert_ulong_rtp(int);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(int);\n" "ulong __ovld __cnfn convert_ulong_rtn(int);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(int);\n" "ulong __ovld __cnfn convert_ulong(int);\n" "ulong __ovld __cnfn convert_ulong_sat(int);\n" "ulong __ovld __cnfn convert_ulong_rte(uint);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(uint);\n" "ulong __ovld __cnfn convert_ulong_rtz(uint);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(uint);\n" "ulong __ovld __cnfn convert_ulong_rtp(uint);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(uint);\n" "ulong __ovld __cnfn convert_ulong_rtn(uint);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(uint);\n" "ulong __ovld __cnfn convert_ulong(uint);\n" "ulong __ovld __cnfn convert_ulong_sat(uint);\n" "ulong __ovld __cnfn convert_ulong_rte(long);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(long);\n" "ulong __ovld __cnfn convert_ulong_rtz(long);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(long);\n" "ulong __ovld __cnfn convert_ulong_rtp(long);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(long);\n" "ulong __ovld __cnfn convert_ulong_rtn(long);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(long);\n" "ulong __ovld __cnfn convert_ulong(long);\n" "ulong __ovld __cnfn convert_ulong_sat(long);\n" "ulong __ovld __cnfn convert_ulong_rte(ulong);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(ulong);\n" "ulong __ovld __cnfn convert_ulong_rtz(ulong);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);\n" "ulong __ovld __cnfn convert_ulong_rtp(ulong);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);\n" "ulong __ovld __cnfn convert_ulong_rtn(ulong);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);\n" "ulong __ovld __cnfn convert_ulong(ulong);\n" "ulong __ovld __cnfn convert_ulong_sat(ulong);\n" "ulong __ovld __cnfn convert_ulong_rte(float);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(float);\n" "ulong __ovld __cnfn convert_ulong_rtz(float);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(float);\n" "ulong __ovld __cnfn convert_ulong_rtp(float);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(float);\n" "ulong __ovld __cnfn convert_ulong_rtn(float);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(float);\n" "ulong __ovld __cnfn convert_ulong(float);\n" "ulong __ovld __cnfn convert_ulong_sat(float);\n" "float __ovld __cnfn convert_float_rte(char);\n" "float __ovld __cnfn convert_float_rtz(char);\n" "float __ovld __cnfn convert_float_rtp(char);\n" "float __ovld __cnfn convert_float_rtn(char);\n" "float __ovld __cnfn convert_float(char);\n" "float __ovld __cnfn convert_float_rte(uchar);\n" "float __ovld __cnfn convert_float_rtz(uchar);\n" "float __ovld __cnfn convert_float_rtp(uchar);\n" "float __ovld __cnfn convert_float_rtn(uchar);\n" "float __ovld __cnfn convert_float(uchar);\n" "float __ovld __cnfn convert_float_rte(short);\n" "float __ovld __cnfn convert_float_rtz(short);\n" "float __ovld __cnfn convert_float_rtp(short);\n" "float __ovld __cnfn convert_float_rtn(short);\n" "float __ovld __cnfn convert_float(short);\n" "float __ovld __cnfn convert_float_rte(ushort);\n" "float __ovld __cnfn convert_float_rtz(ushort);\n" "float __ovld __cnfn convert_float_rtp(ushort);\n" "float __ovld __cnfn convert_float_rtn(ushort);\n" "float __ovld __cnfn convert_float(ushort);\n" "float __ovld __cnfn convert_float_rte(int);\n" "float __ovld __cnfn convert_float_rtz(int);\n" "float __ovld __cnfn convert_float_rtp(int);\n" "float __ovld __cnfn convert_float_rtn(int);\n" "float __ovld __cnfn convert_float(int);\n" "float __ovld __cnfn convert_float_rte(uint);\n" "float __ovld __cnfn convert_float_rtz(uint);\n" "float __ovld __cnfn convert_float_rtp(uint);\n" "float __ovld __cnfn convert_float_rtn(uint);\n" "float __ovld __cnfn convert_float(uint);\n" "float __ovld __cnfn convert_float_rte(long);\n" "float __ovld __cnfn convert_float_rtz(long);\n" "float __ovld __cnfn convert_float_rtp(long);\n" "float __ovld __cnfn convert_float_rtn(long);\n" "float __ovld __cnfn convert_float(long);\n" "float __ovld __cnfn convert_float_rte(ulong);\n" "float __ovld __cnfn convert_float_rtz(ulong);\n" "float __ovld __cnfn convert_float_rtp(ulong);\n" "float __ovld __cnfn convert_float_rtn(ulong);\n" "float __ovld __cnfn convert_float(ulong);\n" "float __ovld __cnfn convert_float_rte(float);\n" "float __ovld __cnfn convert_float_rtz(float);\n" "float __ovld __cnfn convert_float_rtp(float);\n" "float __ovld __cnfn convert_float_rtn(float);\n" "float __ovld __cnfn convert_float(float);\n" "char2 __ovld __cnfn convert_char2_rte(char2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(char2);\n" "char2 __ovld __cnfn convert_char2_rtz(char2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(char2);\n" "char2 __ovld __cnfn convert_char2_rtp(char2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(char2);\n" "char2 __ovld __cnfn convert_char2_rtn(char2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(char2);\n" "char2 __ovld __cnfn convert_char2(char2);\n" "char2 __ovld __cnfn convert_char2_sat(char2);\n" "char2 __ovld __cnfn convert_char2_rte(uchar2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(uchar2);\n" "char2 __ovld __cnfn convert_char2_rtz(uchar2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);\n" "char2 __ovld __cnfn convert_char2_rtp(uchar2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);\n" "char2 __ovld __cnfn convert_char2_rtn(uchar2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);\n" "char2 __ovld __cnfn convert_char2(uchar2);\n" "char2 __ovld __cnfn convert_char2_sat(uchar2);\n" "char2 __ovld __cnfn convert_char2_rte(short2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(short2);\n" "char2 __ovld __cnfn convert_char2_rtz(short2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(short2);\n" "char2 __ovld __cnfn convert_char2_rtp(short2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(short2);\n" "char2 __ovld __cnfn convert_char2_rtn(short2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(short2);\n" "char2 __ovld __cnfn convert_char2(short2);\n" "char2 __ovld __cnfn convert_char2_sat(short2);\n" "char2 __ovld __cnfn convert_char2_rte(ushort2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(ushort2);\n" "char2 __ovld __cnfn convert_char2_rtz(ushort2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);\n" "char2 __ovld __cnfn convert_char2_rtp(ushort2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);\n" "char2 __ovld __cnfn convert_char2_rtn(ushort2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);\n" "char2 __ovld __cnfn convert_char2(ushort2);\n" "char2 __ovld __cnfn convert_char2_sat(ushort2);\n" "char2 __ovld __cnfn convert_char2_rte(int2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(int2);\n" "char2 __ovld __cnfn convert_char2_rtz(int2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(int2);\n" "char2 __ovld __cnfn convert_char2_rtp(int2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(int2);\n" "char2 __ovld __cnfn convert_char2_rtn(int2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(int2);\n" "char2 __ovld __cnfn convert_char2(int2);\n" "char2 __ovld __cnfn convert_char2_sat(int2);\n" "char2 __ovld __cnfn convert_char2_rte(uint2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(uint2);\n" "char2 __ovld __cnfn convert_char2_rtz(uint2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(uint2);\n" "char2 __ovld __cnfn convert_char2_rtp(uint2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(uint2);\n" "char2 __ovld __cnfn convert_char2_rtn(uint2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(uint2);\n" "char2 __ovld __cnfn convert_char2(uint2);\n" "char2 __ovld __cnfn convert_char2_sat(uint2);\n" "char2 __ovld __cnfn convert_char2_rte(long2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(long2);\n" "char2 __ovld __cnfn convert_char2_rtz(long2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(long2);\n" "char2 __ovld __cnfn convert_char2_rtp(long2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(long2);\n" "char2 __ovld __cnfn convert_char2_rtn(long2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(long2);\n" "char2 __ovld __cnfn convert_char2(long2);\n" "char2 __ovld __cnfn convert_char2_sat(long2);\n" "char2 __ovld __cnfn convert_char2_rte(ulong2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(ulong2);\n" "char2 __ovld __cnfn convert_char2_rtz(ulong2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);\n" "char2 __ovld __cnfn convert_char2_rtp(ulong2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);\n" "char2 __ovld __cnfn convert_char2_rtn(ulong2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);\n" "char2 __ovld __cnfn convert_char2(ulong2);\n" "char2 __ovld __cnfn convert_char2_sat(ulong2);\n" "char2 __ovld __cnfn convert_char2_rte(float2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(float2);\n" "char2 __ovld __cnfn convert_char2_rtz(float2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(float2);\n" "char2 __ovld __cnfn convert_char2_rtp(float2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(float2);\n" "char2 __ovld __cnfn convert_char2_rtn(float2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(float2);\n" "char2 __ovld __cnfn convert_char2(float2);\n" "char2 __ovld __cnfn convert_char2_sat(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);\n" "uchar2 __ovld __cnfn convert_uchar2(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(char2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);\n" "uchar2 __ovld __cnfn convert_uchar2(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(short2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);\n" "uchar2 __ovld __cnfn convert_uchar2(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(int2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(uint2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);\n" "uchar2 __ovld __cnfn convert_uchar2(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(long2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);\n" "uchar2 __ovld __cnfn convert_uchar2(float2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(float2);\n" "short2 __ovld __cnfn convert_short2_rte(char2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(char2);\n" "short2 __ovld __cnfn convert_short2_rtz(char2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(char2);\n" "short2 __ovld __cnfn convert_short2_rtp(char2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(char2);\n" "short2 __ovld __cnfn convert_short2_rtn(char2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(char2);\n" "short2 __ovld __cnfn convert_short2(char2);\n" "short2 __ovld __cnfn convert_short2_sat(char2);\n" "short2 __ovld __cnfn convert_short2_rte(uchar2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(uchar2);\n" "short2 __ovld __cnfn convert_short2_rtz(uchar2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);\n" "short2 __ovld __cnfn convert_short2_rtp(uchar2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);\n" "short2 __ovld __cnfn convert_short2_rtn(uchar2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);\n" "short2 __ovld __cnfn convert_short2(uchar2);\n" "short2 __ovld __cnfn convert_short2_sat(uchar2);\n" "short2 __ovld __cnfn convert_short2_rte(short2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(short2);\n" "short2 __ovld __cnfn convert_short2_rtz(short2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(short2);\n" "short2 __ovld __cnfn convert_short2_rtp(short2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(short2);\n" "short2 __ovld __cnfn convert_short2_rtn(short2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(short2);\n" "short2 __ovld __cnfn convert_short2(short2);\n" "short2 __ovld __cnfn convert_short2_sat(short2);\n" "short2 __ovld __cnfn convert_short2_rte(ushort2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(ushort2);\n" "short2 __ovld __cnfn convert_short2_rtz(ushort2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);\n" "short2 __ovld __cnfn convert_short2_rtp(ushort2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);\n" "short2 __ovld __cnfn convert_short2_rtn(ushort2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);\n" "short2 __ovld __cnfn convert_short2(ushort2);\n" "short2 __ovld __cnfn convert_short2_sat(ushort2);\n" "short2 __ovld __cnfn convert_short2_rte(int2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(int2);\n" "short2 __ovld __cnfn convert_short2_rtz(int2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(int2);\n" "short2 __ovld __cnfn convert_short2_rtp(int2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(int2);\n" "short2 __ovld __cnfn convert_short2_rtn(int2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(int2);\n" "short2 __ovld __cnfn convert_short2(int2);\n" "short2 __ovld __cnfn convert_short2_sat(int2);\n" "short2 __ovld __cnfn convert_short2_rte(uint2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(uint2);\n" "short2 __ovld __cnfn convert_short2_rtz(uint2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(uint2);\n" "short2 __ovld __cnfn convert_short2_rtp(uint2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(uint2);\n" "short2 __ovld __cnfn convert_short2_rtn(uint2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(uint2);\n" "short2 __ovld __cnfn convert_short2(uint2);\n" "short2 __ovld __cnfn convert_short2_sat(uint2);\n" "short2 __ovld __cnfn convert_short2_rte(long2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(long2);\n" "short2 __ovld __cnfn convert_short2_rtz(long2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(long2);\n" "short2 __ovld __cnfn convert_short2_rtp(long2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(long2);\n" "short2 __ovld __cnfn convert_short2_rtn(long2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(long2);\n" "short2 __ovld __cnfn convert_short2(long2);\n" "short2 __ovld __cnfn convert_short2_sat(long2);\n" "short2 __ovld __cnfn convert_short2_rte(ulong2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(ulong2);\n" "short2 __ovld __cnfn convert_short2_rtz(ulong2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);\n" "short2 __ovld __cnfn convert_short2_rtp(ulong2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);\n" "short2 __ovld __cnfn convert_short2_rtn(ulong2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);\n" "short2 __ovld __cnfn convert_short2(ulong2);\n" "short2 __ovld __cnfn convert_short2_sat(ulong2);\n" "short2 __ovld __cnfn convert_short2_rte(float2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(float2);\n" "short2 __ovld __cnfn convert_short2_rtz(float2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(float2);\n" "short2 __ovld __cnfn convert_short2_rtp(float2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(float2);\n" "short2 __ovld __cnfn convert_short2_rtn(float2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(float2);\n" "short2 __ovld __cnfn convert_short2(float2);\n" "short2 __ovld __cnfn convert_short2_sat(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);\n" "ushort2 __ovld __cnfn convert_ushort2(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(char2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);\n" "ushort2 __ovld __cnfn convert_ushort2(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(short2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);\n" "ushort2 __ovld __cnfn convert_ushort2(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(int2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(uint2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);\n" "ushort2 __ovld __cnfn convert_ushort2(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(long2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);\n" "ushort2 __ovld __cnfn convert_ushort2(float2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(float2);\n" "int2 __ovld __cnfn convert_int2_rte(char2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(char2);\n" "int2 __ovld __cnfn convert_int2_rtz(char2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(char2);\n" "int2 __ovld __cnfn convert_int2_rtp(char2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(char2);\n" "int2 __ovld __cnfn convert_int2_rtn(char2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(char2);\n" "int2 __ovld __cnfn convert_int2(char2);\n" "int2 __ovld __cnfn convert_int2_sat(char2);\n" "int2 __ovld __cnfn convert_int2_rte(uchar2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(uchar2);\n" "int2 __ovld __cnfn convert_int2_rtz(uchar2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);\n" "int2 __ovld __cnfn convert_int2_rtp(uchar2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);\n" "int2 __ovld __cnfn convert_int2_rtn(uchar2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);\n" "int2 __ovld __cnfn convert_int2(uchar2);\n" "int2 __ovld __cnfn convert_int2_sat(uchar2);\n" "int2 __ovld __cnfn convert_int2_rte(short2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(short2);\n" "int2 __ovld __cnfn convert_int2_rtz(short2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(short2);\n" "int2 __ovld __cnfn convert_int2_rtp(short2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(short2);\n" "int2 __ovld __cnfn convert_int2_rtn(short2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(short2);\n" "int2 __ovld __cnfn convert_int2(short2);\n" "int2 __ovld __cnfn convert_int2_sat(short2);\n" "int2 __ovld __cnfn convert_int2_rte(ushort2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(ushort2);\n" "int2 __ovld __cnfn convert_int2_rtz(ushort2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);\n" "int2 __ovld __cnfn convert_int2_rtp(ushort2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);\n" "int2 __ovld __cnfn convert_int2_rtn(ushort2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);\n" "int2 __ovld __cnfn convert_int2(ushort2);\n" "int2 __ovld __cnfn convert_int2_sat(ushort2);\n" "int2 __ovld __cnfn convert_int2_rte(int2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(int2);\n" "int2 __ovld __cnfn convert_int2_rtz(int2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(int2);\n" "int2 __ovld __cnfn convert_int2_rtp(int2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(int2);\n" "int2 __ovld __cnfn convert_int2_rtn(int2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(int2);\n" "int2 __ovld __cnfn convert_int2(int2);\n" "int2 __ovld __cnfn convert_int2_sat(int2);\n" "int2 __ovld __cnfn convert_int2_rte(uint2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(uint2);\n" "int2 __ovld __cnfn convert_int2_rtz(uint2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(uint2);\n" "int2 __ovld __cnfn convert_int2_rtp(uint2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(uint2);\n" "int2 __ovld __cnfn convert_int2_rtn(uint2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(uint2);\n" "int2 __ovld __cnfn convert_int2(uint2);\n" "int2 __ovld __cnfn convert_int2_sat(uint2);\n" "int2 __ovld __cnfn convert_int2_rte(long2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(long2);\n" "int2 __ovld __cnfn convert_int2_rtz(long2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(long2);\n" "int2 __ovld __cnfn convert_int2_rtp(long2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(long2);\n" "int2 __ovld __cnfn convert_int2_rtn(long2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(long2);\n" "int2 __ovld __cnfn convert_int2(long2);\n" "int2 __ovld __cnfn convert_int2_sat(long2);\n" "int2 __ovld __cnfn convert_int2_rte(ulong2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(ulong2);\n" "int2 __ovld __cnfn convert_int2_rtz(ulong2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);\n" "int2 __ovld __cnfn convert_int2_rtp(ulong2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);\n" "int2 __ovld __cnfn convert_int2_rtn(ulong2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);\n" "int2 __ovld __cnfn convert_int2(ulong2);\n" "int2 __ovld __cnfn convert_int2_sat(ulong2);\n" "int2 __ovld __cnfn convert_int2_rte(float2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(float2);\n" "int2 __ovld __cnfn convert_int2_rtz(float2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(float2);\n" "int2 __ovld __cnfn convert_int2_rtp(float2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(float2);\n" "int2 __ovld __cnfn convert_int2_rtn(float2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(float2);\n" "int2 __ovld __cnfn convert_int2(float2);\n" "int2 __ovld __cnfn convert_int2_sat(float2);\n" "uint2 __ovld __cnfn convert_uint2_rte(char2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(char2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(char2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(char2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(char2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);\n" "uint2 __ovld __cnfn convert_uint2(char2);\n" "uint2 __ovld __cnfn convert_uint2_sat(char2);\n" "uint2 __ovld __cnfn convert_uint2_rte(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);\n" "uint2 __ovld __cnfn convert_uint2(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_sat(uchar2);\n" "uint2 __ovld __cnfn convert_uint2_rte(short2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(short2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(short2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(short2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(short2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);\n" "uint2 __ovld __cnfn convert_uint2(short2);\n" "uint2 __ovld __cnfn convert_uint2_sat(short2);\n" "uint2 __ovld __cnfn convert_uint2_rte(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);\n" "uint2 __ovld __cnfn convert_uint2(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_sat(ushort2);\n" "uint2 __ovld __cnfn convert_uint2_rte(int2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(int2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(int2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(int2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(int2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);\n" "uint2 __ovld __cnfn convert_uint2(int2);\n" "uint2 __ovld __cnfn convert_uint2_sat(int2);\n" "uint2 __ovld __cnfn convert_uint2_rte(uint2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(uint2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(uint2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(uint2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);\n" "uint2 __ovld __cnfn convert_uint2(uint2);\n" "uint2 __ovld __cnfn convert_uint2_sat(uint2);\n" "uint2 __ovld __cnfn convert_uint2_rte(long2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(long2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(long2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(long2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(long2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);\n" "uint2 __ovld __cnfn convert_uint2(long2);\n" "uint2 __ovld __cnfn convert_uint2_sat(long2);\n" "uint2 __ovld __cnfn convert_uint2_rte(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);\n" "uint2 __ovld __cnfn convert_uint2(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_sat(ulong2);\n" "uint2 __ovld __cnfn convert_uint2_rte(float2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(float2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(float2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(float2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(float2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);\n" "uint2 __ovld __cnfn convert_uint2(float2);\n" "uint2 __ovld __cnfn convert_uint2_sat(float2);\n" "long2 __ovld __cnfn convert_long2_rte(char2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(char2);\n" "long2 __ovld __cnfn convert_long2_rtz(char2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(char2);\n" "long2 __ovld __cnfn convert_long2_rtp(char2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(char2);\n" "long2 __ovld __cnfn convert_long2_rtn(char2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(char2);\n" "long2 __ovld __cnfn convert_long2(char2);\n" "long2 __ovld __cnfn convert_long2_sat(char2);\n" "long2 __ovld __cnfn convert_long2_rte(uchar2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(uchar2);\n" "long2 __ovld __cnfn convert_long2_rtz(uchar2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);\n" "long2 __ovld __cnfn convert_long2_rtp(uchar2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);\n" "long2 __ovld __cnfn convert_long2_rtn(uchar2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);\n" "long2 __ovld __cnfn convert_long2(uchar2);\n" "long2 __ovld __cnfn convert_long2_sat(uchar2);\n" "long2 __ovld __cnfn convert_long2_rte(short2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(short2);\n" "long2 __ovld __cnfn convert_long2_rtz(short2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(short2);\n" "long2 __ovld __cnfn convert_long2_rtp(short2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(short2);\n" "long2 __ovld __cnfn convert_long2_rtn(short2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(short2);\n" "long2 __ovld __cnfn convert_long2(short2);\n" "long2 __ovld __cnfn convert_long2_sat(short2);\n" "long2 __ovld __cnfn convert_long2_rte(ushort2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(ushort2);\n" "long2 __ovld __cnfn convert_long2_rtz(ushort2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);\n" "long2 __ovld __cnfn convert_long2_rtp(ushort2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);\n" "long2 __ovld __cnfn convert_long2_rtn(ushort2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);\n" "long2 __ovld __cnfn convert_long2(ushort2);\n" "long2 __ovld __cnfn convert_long2_sat(ushort2);\n" "long2 __ovld __cnfn convert_long2_rte(int2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(int2);\n" "long2 __ovld __cnfn convert_long2_rtz(int2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(int2);\n" "long2 __ovld __cnfn convert_long2_rtp(int2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(int2);\n" "long2 __ovld __cnfn convert_long2_rtn(int2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(int2);\n" "long2 __ovld __cnfn convert_long2(int2);\n" "long2 __ovld __cnfn convert_long2_sat(int2);\n" "long2 __ovld __cnfn convert_long2_rte(uint2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(uint2);\n" "long2 __ovld __cnfn convert_long2_rtz(uint2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(uint2);\n" "long2 __ovld __cnfn convert_long2_rtp(uint2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(uint2);\n" "long2 __ovld __cnfn convert_long2_rtn(uint2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(uint2);\n" "long2 __ovld __cnfn convert_long2(uint2);\n" "long2 __ovld __cnfn convert_long2_sat(uint2);\n" "long2 __ovld __cnfn convert_long2_rte(long2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(long2);\n" "long2 __ovld __cnfn convert_long2_rtz(long2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(long2);\n" "long2 __ovld __cnfn convert_long2_rtp(long2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(long2);\n" "long2 __ovld __cnfn convert_long2_rtn(long2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(long2);\n" "long2 __ovld __cnfn convert_long2(long2);\n" "long2 __ovld __cnfn convert_long2_sat(long2);\n" "long2 __ovld __cnfn convert_long2_rte(ulong2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(ulong2);\n" "long2 __ovld __cnfn convert_long2_rtz(ulong2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);\n" "long2 __ovld __cnfn convert_long2_rtp(ulong2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);\n" "long2 __ovld __cnfn convert_long2_rtn(ulong2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);\n" "long2 __ovld __cnfn convert_long2(ulong2);\n" "long2 __ovld __cnfn convert_long2_sat(ulong2);\n" "long2 __ovld __cnfn convert_long2_rte(float2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(float2);\n" "long2 __ovld __cnfn convert_long2_rtz(float2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(float2);\n" "long2 __ovld __cnfn convert_long2_rtp(float2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(float2);\n" "long2 __ovld __cnfn convert_long2_rtn(float2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(float2);\n" "long2 __ovld __cnfn convert_long2(float2);\n" "long2 __ovld __cnfn convert_long2_sat(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);\n" "ulong2 __ovld __cnfn convert_ulong2(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(char2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);\n" "ulong2 __ovld __cnfn convert_ulong2(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(short2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);\n" "ulong2 __ovld __cnfn convert_ulong2(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(int2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(uint2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);\n" "ulong2 __ovld __cnfn convert_ulong2(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(long2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);\n" "ulong2 __ovld __cnfn convert_ulong2(float2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(float2);\n" "float2 __ovld __cnfn convert_float2_rte(char2);\n" "float2 __ovld __cnfn convert_float2_rtz(char2);\n" "float2 __ovld __cnfn convert_float2_rtp(char2);\n" "float2 __ovld __cnfn convert_float2_rtn(char2);\n" "float2 __ovld __cnfn convert_float2(char2);\n" "float2 __ovld __cnfn convert_float2_rte(uchar2);\n" "float2 __ovld __cnfn convert_float2_rtz(uchar2);\n" "float2 __ovld __cnfn convert_float2_rtp(uchar2);\n" "float2 __ovld __cnfn convert_float2_rtn(uchar2);\n" "float2 __ovld __cnfn convert_float2(uchar2);\n" "float2 __ovld __cnfn convert_float2_rte(short2);\n" "float2 __ovld __cnfn convert_float2_rtz(short2);\n" "float2 __ovld __cnfn convert_float2_rtp(short2);\n" "float2 __ovld __cnfn convert_float2_rtn(short2);\n" "float2 __ovld __cnfn convert_float2(short2);\n" "float2 __ovld __cnfn convert_float2_rte(ushort2);\n" "float2 __ovld __cnfn convert_float2_rtz(ushort2);\n" "float2 __ovld __cnfn convert_float2_rtp(ushort2);\n" "float2 __ovld __cnfn convert_float2_rtn(ushort2);\n" "float2 __ovld __cnfn convert_float2(ushort2);\n" "float2 __ovld __cnfn convert_float2_rte(int2);\n" "float2 __ovld __cnfn convert_float2_rtz(int2);\n" "float2 __ovld __cnfn convert_float2_rtp(int2);\n" "float2 __ovld __cnfn convert_float2_rtn(int2);\n" "float2 __ovld __cnfn convert_float2(int2);\n" "float2 __ovld __cnfn convert_float2_rte(uint2);\n" "float2 __ovld __cnfn convert_float2_rtz(uint2);\n" "float2 __ovld __cnfn convert_float2_rtp(uint2);\n" "float2 __ovld __cnfn convert_float2_rtn(uint2);\n" "float2 __ovld __cnfn convert_float2(uint2);\n" "float2 __ovld __cnfn convert_float2_rte(long2);\n" "float2 __ovld __cnfn convert_float2_rtz(long2);\n" "float2 __ovld __cnfn convert_float2_rtp(long2);\n" "float2 __ovld __cnfn convert_float2_rtn(long2);\n" "float2 __ovld __cnfn convert_float2(long2);\n" "float2 __ovld __cnfn convert_float2_rte(ulong2);\n" "float2 __ovld __cnfn convert_float2_rtz(ulong2);\n" "float2 __ovld __cnfn convert_float2_rtp(ulong2);\n" "float2 __ovld __cnfn convert_float2_rtn(ulong2);\n" "float2 __ovld __cnfn convert_float2(ulong2);\n" "float2 __ovld __cnfn convert_float2_rte(float2);\n" "float2 __ovld __cnfn convert_float2_rtz(float2);\n" "float2 __ovld __cnfn convert_float2_rtp(float2);\n" "float2 __ovld __cnfn convert_float2_rtn(float2);\n" "float2 __ovld __cnfn convert_float2(float2);\n" "char3 __ovld __cnfn convert_char3_rte(char3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(char3);\n" "char3 __ovld __cnfn convert_char3_rtz(char3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(char3);\n" "char3 __ovld __cnfn convert_char3_rtp(char3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(char3);\n" "char3 __ovld __cnfn convert_char3_rtn(char3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(char3);\n" "char3 __ovld __cnfn convert_char3(char3);\n" "char3 __ovld __cnfn convert_char3_sat(char3);\n" "char3 __ovld __cnfn convert_char3_rte(uchar3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(uchar3);\n" "char3 __ovld __cnfn convert_char3_rtz(uchar3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);\n" "char3 __ovld __cnfn convert_char3_rtp(uchar3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);\n" "char3 __ovld __cnfn convert_char3_rtn(uchar3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);\n" "char3 __ovld __cnfn convert_char3(uchar3);\n" "char3 __ovld __cnfn convert_char3_sat(uchar3);\n" "char3 __ovld __cnfn convert_char3_rte(short3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(short3);\n" "char3 __ovld __cnfn convert_char3_rtz(short3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(short3);\n" "char3 __ovld __cnfn convert_char3_rtp(short3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(short3);\n" "char3 __ovld __cnfn convert_char3_rtn(short3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(short3);\n" "char3 __ovld __cnfn convert_char3(short3);\n" "char3 __ovld __cnfn convert_char3_sat(short3);\n" "char3 __ovld __cnfn convert_char3_rte(ushort3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(ushort3);\n" "char3 __ovld __cnfn convert_char3_rtz(ushort3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);\n" "char3 __ovld __cnfn convert_char3_rtp(ushort3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);\n" "char3 __ovld __cnfn convert_char3_rtn(ushort3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);\n" "char3 __ovld __cnfn convert_char3(ushort3);\n" "char3 __ovld __cnfn convert_char3_sat(ushort3);\n" "char3 __ovld __cnfn convert_char3_rte(int3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(int3);\n" "char3 __ovld __cnfn convert_char3_rtz(int3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(int3);\n" "char3 __ovld __cnfn convert_char3_rtp(int3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(int3);\n" "char3 __ovld __cnfn convert_char3_rtn(int3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(int3);\n" "char3 __ovld __cnfn convert_char3(int3);\n" "char3 __ovld __cnfn convert_char3_sat(int3);\n" "char3 __ovld __cnfn convert_char3_rte(uint3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(uint3);\n" "char3 __ovld __cnfn convert_char3_rtz(uint3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(uint3);\n" "char3 __ovld __cnfn convert_char3_rtp(uint3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(uint3);\n" "char3 __ovld __cnfn convert_char3_rtn(uint3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(uint3);\n" "char3 __ovld __cnfn convert_char3(uint3);\n" "char3 __ovld __cnfn convert_char3_sat(uint3);\n" "char3 __ovld __cnfn convert_char3_rte(long3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(long3);\n" "char3 __ovld __cnfn convert_char3_rtz(long3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(long3);\n" "char3 __ovld __cnfn convert_char3_rtp(long3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(long3);\n" "char3 __ovld __cnfn convert_char3_rtn(long3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(long3);\n" "char3 __ovld __cnfn convert_char3(long3);\n" "char3 __ovld __cnfn convert_char3_sat(long3);\n" "char3 __ovld __cnfn convert_char3_rte(ulong3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(ulong3);\n" "char3 __ovld __cnfn convert_char3_rtz(ulong3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);\n" "char3 __ovld __cnfn convert_char3_rtp(ulong3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);\n" "char3 __ovld __cnfn convert_char3_rtn(ulong3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);\n" "char3 __ovld __cnfn convert_char3(ulong3);\n" "char3 __ovld __cnfn convert_char3_sat(ulong3);\n" "char3 __ovld __cnfn convert_char3_rte(float3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(float3);\n" "char3 __ovld __cnfn convert_char3_rtz(float3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(float3);\n" "char3 __ovld __cnfn convert_char3_rtp(float3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(float3);\n" "char3 __ovld __cnfn convert_char3_rtn(float3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(float3);\n" "char3 __ovld __cnfn convert_char3(float3);\n" "char3 __ovld __cnfn convert_char3_sat(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);\n" "uchar3 __ovld __cnfn convert_uchar3(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(char3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);\n" "uchar3 __ovld __cnfn convert_uchar3(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(short3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);\n" "uchar3 __ovld __cnfn convert_uchar3(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(int3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(uint3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);\n" "uchar3 __ovld __cnfn convert_uchar3(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(long3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);\n" "uchar3 __ovld __cnfn convert_uchar3(float3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(float3);\n" "short3 __ovld __cnfn convert_short3_rte(char3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(char3);\n" "short3 __ovld __cnfn convert_short3_rtz(char3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(char3);\n" "short3 __ovld __cnfn convert_short3_rtp(char3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(char3);\n" "short3 __ovld __cnfn convert_short3_rtn(char3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(char3);\n" "short3 __ovld __cnfn convert_short3(char3);\n" "short3 __ovld __cnfn convert_short3_sat(char3);\n" "short3 __ovld __cnfn convert_short3_rte(uchar3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(uchar3);\n" "short3 __ovld __cnfn convert_short3_rtz(uchar3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);\n" "short3 __ovld __cnfn convert_short3_rtp(uchar3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);\n" "short3 __ovld __cnfn convert_short3_rtn(uchar3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);\n" "short3 __ovld __cnfn convert_short3(uchar3);\n" "short3 __ovld __cnfn convert_short3_sat(uchar3);\n" "short3 __ovld __cnfn convert_short3_rte(short3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(short3);\n" "short3 __ovld __cnfn convert_short3_rtz(short3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(short3);\n" "short3 __ovld __cnfn convert_short3_rtp(short3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(short3);\n" "short3 __ovld __cnfn convert_short3_rtn(short3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(short3);\n" "short3 __ovld __cnfn convert_short3(short3);\n" "short3 __ovld __cnfn convert_short3_sat(short3);\n" "short3 __ovld __cnfn convert_short3_rte(ushort3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(ushort3);\n" "short3 __ovld __cnfn convert_short3_rtz(ushort3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);\n" "short3 __ovld __cnfn convert_short3_rtp(ushort3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);\n" "short3 __ovld __cnfn convert_short3_rtn(ushort3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);\n" "short3 __ovld __cnfn convert_short3(ushort3);\n" "short3 __ovld __cnfn convert_short3_sat(ushort3);\n" "short3 __ovld __cnfn convert_short3_rte(int3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(int3);\n" "short3 __ovld __cnfn convert_short3_rtz(int3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(int3);\n" "short3 __ovld __cnfn convert_short3_rtp(int3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(int3);\n" "short3 __ovld __cnfn convert_short3_rtn(int3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(int3);\n" "short3 __ovld __cnfn convert_short3(int3);\n" "short3 __ovld __cnfn convert_short3_sat(int3);\n" "short3 __ovld __cnfn convert_short3_rte(uint3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(uint3);\n" "short3 __ovld __cnfn convert_short3_rtz(uint3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(uint3);\n" "short3 __ovld __cnfn convert_short3_rtp(uint3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(uint3);\n" "short3 __ovld __cnfn convert_short3_rtn(uint3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(uint3);\n" "short3 __ovld __cnfn convert_short3(uint3);\n" "short3 __ovld __cnfn convert_short3_sat(uint3);\n" "short3 __ovld __cnfn convert_short3_rte(long3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(long3);\n" "short3 __ovld __cnfn convert_short3_rtz(long3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(long3);\n" "short3 __ovld __cnfn convert_short3_rtp(long3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(long3);\n" "short3 __ovld __cnfn convert_short3_rtn(long3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(long3);\n" "short3 __ovld __cnfn convert_short3(long3);\n" "short3 __ovld __cnfn convert_short3_sat(long3);\n" "short3 __ovld __cnfn convert_short3_rte(ulong3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(ulong3);\n" "short3 __ovld __cnfn convert_short3_rtz(ulong3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);\n" "short3 __ovld __cnfn convert_short3_rtp(ulong3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);\n" "short3 __ovld __cnfn convert_short3_rtn(ulong3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);\n" "short3 __ovld __cnfn convert_short3(ulong3);\n" "short3 __ovld __cnfn convert_short3_sat(ulong3);\n" "short3 __ovld __cnfn convert_short3_rte(float3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(float3);\n" "short3 __ovld __cnfn convert_short3_rtz(float3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(float3);\n" "short3 __ovld __cnfn convert_short3_rtp(float3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(float3);\n" "short3 __ovld __cnfn convert_short3_rtn(float3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(float3);\n" "short3 __ovld __cnfn convert_short3(float3);\n" "short3 __ovld __cnfn convert_short3_sat(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);\n" "ushort3 __ovld __cnfn convert_ushort3(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(char3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);\n" "ushort3 __ovld __cnfn convert_ushort3(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(short3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);\n" "ushort3 __ovld __cnfn convert_ushort3(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(int3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(uint3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);\n" "ushort3 __ovld __cnfn convert_ushort3(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(long3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);\n" "ushort3 __ovld __cnfn convert_ushort3(float3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(float3);\n" "int3 __ovld __cnfn convert_int3_rte(char3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(char3);\n" "int3 __ovld __cnfn convert_int3_rtz(char3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(char3);\n" "int3 __ovld __cnfn convert_int3_rtp(char3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(char3);\n" "int3 __ovld __cnfn convert_int3_rtn(char3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(char3);\n" "int3 __ovld __cnfn convert_int3(char3);\n" "int3 __ovld __cnfn convert_int3_sat(char3);\n" "int3 __ovld __cnfn convert_int3_rte(uchar3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(uchar3);\n" "int3 __ovld __cnfn convert_int3_rtz(uchar3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);\n" "int3 __ovld __cnfn convert_int3_rtp(uchar3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);\n" "int3 __ovld __cnfn convert_int3_rtn(uchar3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);\n" "int3 __ovld __cnfn convert_int3(uchar3);\n" "int3 __ovld __cnfn convert_int3_sat(uchar3);\n" "int3 __ovld __cnfn convert_int3_rte(short3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(short3);\n" "int3 __ovld __cnfn convert_int3_rtz(short3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(short3);\n" "int3 __ovld __cnfn convert_int3_rtp(short3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(short3);\n" "int3 __ovld __cnfn convert_int3_rtn(short3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(short3);\n" "int3 __ovld __cnfn convert_int3(short3);\n" "int3 __ovld __cnfn convert_int3_sat(short3);\n" "int3 __ovld __cnfn convert_int3_rte(ushort3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(ushort3);\n" "int3 __ovld __cnfn convert_int3_rtz(ushort3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);\n" "int3 __ovld __cnfn convert_int3_rtp(ushort3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);\n" "int3 __ovld __cnfn convert_int3_rtn(ushort3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);\n" "int3 __ovld __cnfn convert_int3(ushort3);\n" "int3 __ovld __cnfn convert_int3_sat(ushort3);\n" "int3 __ovld __cnfn convert_int3_rte(int3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(int3);\n" "int3 __ovld __cnfn convert_int3_rtz(int3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(int3);\n" "int3 __ovld __cnfn convert_int3_rtp(int3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(int3);\n" "int3 __ovld __cnfn convert_int3_rtn(int3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(int3);\n" "int3 __ovld __cnfn convert_int3(int3);\n" "int3 __ovld __cnfn convert_int3_sat(int3);\n" "int3 __ovld __cnfn convert_int3_rte(uint3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(uint3);\n" "int3 __ovld __cnfn convert_int3_rtz(uint3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(uint3);\n" "int3 __ovld __cnfn convert_int3_rtp(uint3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(uint3);\n" "int3 __ovld __cnfn convert_int3_rtn(uint3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(uint3);\n" "int3 __ovld __cnfn convert_int3(uint3);\n" "int3 __ovld __cnfn convert_int3_sat(uint3);\n" "int3 __ovld __cnfn convert_int3_rte(long3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(long3);\n" "int3 __ovld __cnfn convert_int3_rtz(long3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(long3);\n" "int3 __ovld __cnfn convert_int3_rtp(long3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(long3);\n" "int3 __ovld __cnfn convert_int3_rtn(long3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(long3);\n" "int3 __ovld __cnfn convert_int3(long3);\n" "int3 __ovld __cnfn convert_int3_sat(long3);\n" "int3 __ovld __cnfn convert_int3_rte(ulong3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(ulong3);\n" "int3 __ovld __cnfn convert_int3_rtz(ulong3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);\n" "int3 __ovld __cnfn convert_int3_rtp(ulong3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);\n" "int3 __ovld __cnfn convert_int3_rtn(ulong3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);\n" "int3 __ovld __cnfn convert_int3(ulong3);\n" "int3 __ovld __cnfn convert_int3_sat(ulong3);\n" "int3 __ovld __cnfn convert_int3_rte(float3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(float3);\n" "int3 __ovld __cnfn convert_int3_rtz(float3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(float3);\n" "int3 __ovld __cnfn convert_int3_rtp(float3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(float3);\n" "int3 __ovld __cnfn convert_int3_rtn(float3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(float3);\n" "int3 __ovld __cnfn convert_int3(float3);\n" "int3 __ovld __cnfn convert_int3_sat(float3);\n" "uint3 __ovld __cnfn convert_uint3_rte(char3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(char3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(char3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(char3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(char3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);\n" "uint3 __ovld __cnfn convert_uint3(char3);\n" "uint3 __ovld __cnfn convert_uint3_sat(char3);\n" "uint3 __ovld __cnfn convert_uint3_rte(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);\n" "uint3 __ovld __cnfn convert_uint3(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_sat(uchar3);\n" "uint3 __ovld __cnfn convert_uint3_rte(short3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(short3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(short3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(short3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(short3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);\n" "uint3 __ovld __cnfn convert_uint3(short3);\n" "uint3 __ovld __cnfn convert_uint3_sat(short3);\n" "uint3 __ovld __cnfn convert_uint3_rte(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);\n" "uint3 __ovld __cnfn convert_uint3(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_sat(ushort3);\n" "uint3 __ovld __cnfn convert_uint3_rte(int3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(int3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(int3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(int3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(int3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);\n" "uint3 __ovld __cnfn convert_uint3(int3);\n" "uint3 __ovld __cnfn convert_uint3_sat(int3);\n" "uint3 __ovld __cnfn convert_uint3_rte(uint3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(uint3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(uint3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(uint3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);\n" "uint3 __ovld __cnfn convert_uint3(uint3);\n" "uint3 __ovld __cnfn convert_uint3_sat(uint3);\n" "uint3 __ovld __cnfn convert_uint3_rte(long3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(long3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(long3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(long3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(long3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);\n" "uint3 __ovld __cnfn convert_uint3(long3);\n" "uint3 __ovld __cnfn convert_uint3_sat(long3);\n" "uint3 __ovld __cnfn convert_uint3_rte(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);\n" "uint3 __ovld __cnfn convert_uint3(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_sat(ulong3);\n" "uint3 __ovld __cnfn convert_uint3_rte(float3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(float3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(float3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(float3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(float3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);\n" "uint3 __ovld __cnfn convert_uint3(float3);\n" "uint3 __ovld __cnfn convert_uint3_sat(float3);\n" "long3 __ovld __cnfn convert_long3_rte(char3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(char3);\n" "long3 __ovld __cnfn convert_long3_rtz(char3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(char3);\n" "long3 __ovld __cnfn convert_long3_rtp(char3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(char3);\n" "long3 __ovld __cnfn convert_long3_rtn(char3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(char3);\n" "long3 __ovld __cnfn convert_long3(char3);\n" "long3 __ovld __cnfn convert_long3_sat(char3);\n" "long3 __ovld __cnfn convert_long3_rte(uchar3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(uchar3);\n" "long3 __ovld __cnfn convert_long3_rtz(uchar3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);\n" "long3 __ovld __cnfn convert_long3_rtp(uchar3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);\n" "long3 __ovld __cnfn convert_long3_rtn(uchar3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);\n" "long3 __ovld __cnfn convert_long3(uchar3);\n" "long3 __ovld __cnfn convert_long3_sat(uchar3);\n" "long3 __ovld __cnfn convert_long3_rte(short3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(short3);\n" "long3 __ovld __cnfn convert_long3_rtz(short3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(short3);\n" "long3 __ovld __cnfn convert_long3_rtp(short3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(short3);\n" "long3 __ovld __cnfn convert_long3_rtn(short3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(short3);\n" "long3 __ovld __cnfn convert_long3(short3);\n" "long3 __ovld __cnfn convert_long3_sat(short3);\n" "long3 __ovld __cnfn convert_long3_rte(ushort3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(ushort3);\n" "long3 __ovld __cnfn convert_long3_rtz(ushort3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);\n" "long3 __ovld __cnfn convert_long3_rtp(ushort3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);\n" "long3 __ovld __cnfn convert_long3_rtn(ushort3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);\n" "long3 __ovld __cnfn convert_long3(ushort3);\n" "long3 __ovld __cnfn convert_long3_sat(ushort3);\n" "long3 __ovld __cnfn convert_long3_rte(int3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(int3);\n" "long3 __ovld __cnfn convert_long3_rtz(int3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(int3);\n" "long3 __ovld __cnfn convert_long3_rtp(int3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(int3);\n" "long3 __ovld __cnfn convert_long3_rtn(int3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(int3);\n" "long3 __ovld __cnfn convert_long3(int3);\n" "long3 __ovld __cnfn convert_long3_sat(int3);\n" "long3 __ovld __cnfn convert_long3_rte(uint3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(uint3);\n" "long3 __ovld __cnfn convert_long3_rtz(uint3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(uint3);\n" "long3 __ovld __cnfn convert_long3_rtp(uint3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(uint3);\n" "long3 __ovld __cnfn convert_long3_rtn(uint3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(uint3);\n" "long3 __ovld __cnfn convert_long3(uint3);\n" "long3 __ovld __cnfn convert_long3_sat(uint3);\n" "long3 __ovld __cnfn convert_long3_rte(long3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(long3);\n" "long3 __ovld __cnfn convert_long3_rtz(long3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(long3);\n" "long3 __ovld __cnfn convert_long3_rtp(long3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(long3);\n" "long3 __ovld __cnfn convert_long3_rtn(long3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(long3);\n" "long3 __ovld __cnfn convert_long3(long3);\n" "long3 __ovld __cnfn convert_long3_sat(long3);\n" "long3 __ovld __cnfn convert_long3_rte(ulong3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(ulong3);\n" "long3 __ovld __cnfn convert_long3_rtz(ulong3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);\n" "long3 __ovld __cnfn convert_long3_rtp(ulong3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);\n" "long3 __ovld __cnfn convert_long3_rtn(ulong3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);\n" "long3 __ovld __cnfn convert_long3(ulong3);\n" "long3 __ovld __cnfn convert_long3_sat(ulong3);\n" "long3 __ovld __cnfn convert_long3_rte(float3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(float3);\n" "long3 __ovld __cnfn convert_long3_rtz(float3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(float3);\n" "long3 __ovld __cnfn convert_long3_rtp(float3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(float3);\n" "long3 __ovld __cnfn convert_long3_rtn(float3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(float3);\n" "long3 __ovld __cnfn convert_long3(float3);\n" "long3 __ovld __cnfn convert_long3_sat(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);\n" "ulong3 __ovld __cnfn convert_ulong3(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(char3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);\n" "ulong3 __ovld __cnfn convert_ulong3(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(short3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);\n" "ulong3 __ovld __cnfn convert_ulong3(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(int3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(uint3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);\n" "ulong3 __ovld __cnfn convert_ulong3(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(long3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);\n" "ulong3 __ovld __cnfn convert_ulong3(float3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(float3);\n" "float3 __ovld __cnfn convert_float3_rte(char3);\n" "float3 __ovld __cnfn convert_float3_rtz(char3);\n" "float3 __ovld __cnfn convert_float3_rtp(char3);\n" "float3 __ovld __cnfn convert_float3_rtn(char3);\n" "float3 __ovld __cnfn convert_float3(char3);\n" "float3 __ovld __cnfn convert_float3_rte(uchar3);\n" "float3 __ovld __cnfn convert_float3_rtz(uchar3);\n" "float3 __ovld __cnfn convert_float3_rtp(uchar3);\n" "float3 __ovld __cnfn convert_float3_rtn(uchar3);\n" "float3 __ovld __cnfn convert_float3(uchar3);\n" "float3 __ovld __cnfn convert_float3_rte(short3);\n" "float3 __ovld __cnfn convert_float3_rtz(short3);\n" "float3 __ovld __cnfn convert_float3_rtp(short3);\n" "float3 __ovld __cnfn convert_float3_rtn(short3);\n" "float3 __ovld __cnfn convert_float3(short3);\n" "float3 __ovld __cnfn convert_float3_rte(ushort3);\n" "float3 __ovld __cnfn convert_float3_rtz(ushort3);\n" "float3 __ovld __cnfn convert_float3_rtp(ushort3);\n" "float3 __ovld __cnfn convert_float3_rtn(ushort3);\n" "float3 __ovld __cnfn convert_float3(ushort3);\n" "float3 __ovld __cnfn convert_float3_rte(int3);\n" "float3 __ovld __cnfn convert_float3_rtz(int3);\n" "float3 __ovld __cnfn convert_float3_rtp(int3);\n" "float3 __ovld __cnfn convert_float3_rtn(int3);\n" "float3 __ovld __cnfn convert_float3(int3);\n" "float3 __ovld __cnfn convert_float3_rte(uint3);\n" "float3 __ovld __cnfn convert_float3_rtz(uint3);\n" "float3 __ovld __cnfn convert_float3_rtp(uint3);\n" "float3 __ovld __cnfn convert_float3_rtn(uint3);\n" "float3 __ovld __cnfn convert_float3(uint3);\n" "float3 __ovld __cnfn convert_float3_rte(long3);\n" "float3 __ovld __cnfn convert_float3_rtz(long3);\n" "float3 __ovld __cnfn convert_float3_rtp(long3);\n" "float3 __ovld __cnfn convert_float3_rtn(long3);\n" "float3 __ovld __cnfn convert_float3(long3);\n" "float3 __ovld __cnfn convert_float3_rte(ulong3);\n" "float3 __ovld __cnfn convert_float3_rtz(ulong3);\n" "float3 __ovld __cnfn convert_float3_rtp(ulong3);\n" "float3 __ovld __cnfn convert_float3_rtn(ulong3);\n" "float3 __ovld __cnfn convert_float3(ulong3);\n" "float3 __ovld __cnfn convert_float3_rte(float3);\n" "float3 __ovld __cnfn convert_float3_rtz(float3);\n" "float3 __ovld __cnfn convert_float3_rtp(float3);\n" "float3 __ovld __cnfn convert_float3_rtn(float3);\n" "float3 __ovld __cnfn convert_float3(float3);\n" "char4 __ovld __cnfn convert_char4_rte(char4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(char4);\n" "char4 __ovld __cnfn convert_char4_rtz(char4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(char4);\n" "char4 __ovld __cnfn convert_char4_rtp(char4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(char4);\n" "char4 __ovld __cnfn convert_char4_rtn(char4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(char4);\n" "char4 __ovld __cnfn convert_char4(char4);\n" "char4 __ovld __cnfn convert_char4_sat(char4);\n" "char4 __ovld __cnfn convert_char4_rte(uchar4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(uchar4);\n" "char4 __ovld __cnfn convert_char4_rtz(uchar4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);\n" "char4 __ovld __cnfn convert_char4_rtp(uchar4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);\n" "char4 __ovld __cnfn convert_char4_rtn(uchar4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);\n" "char4 __ovld __cnfn convert_char4(uchar4);\n" "char4 __ovld __cnfn convert_char4_sat(uchar4);\n" "char4 __ovld __cnfn convert_char4_rte(short4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(short4);\n" "char4 __ovld __cnfn convert_char4_rtz(short4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(short4);\n" "char4 __ovld __cnfn convert_char4_rtp(short4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(short4);\n" "char4 __ovld __cnfn convert_char4_rtn(short4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(short4);\n" "char4 __ovld __cnfn convert_char4(short4);\n" "char4 __ovld __cnfn convert_char4_sat(short4);\n" "char4 __ovld __cnfn convert_char4_rte(ushort4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(ushort4);\n" "char4 __ovld __cnfn convert_char4_rtz(ushort4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);\n" "char4 __ovld __cnfn convert_char4_rtp(ushort4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);\n" "char4 __ovld __cnfn convert_char4_rtn(ushort4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);\n" "char4 __ovld __cnfn convert_char4(ushort4);\n" "char4 __ovld __cnfn convert_char4_sat(ushort4);\n" "char4 __ovld __cnfn convert_char4_rte(int4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(int4);\n" "char4 __ovld __cnfn convert_char4_rtz(int4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(int4);\n" "char4 __ovld __cnfn convert_char4_rtp(int4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(int4);\n" "char4 __ovld __cnfn convert_char4_rtn(int4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(int4);\n" "char4 __ovld __cnfn convert_char4(int4);\n" "char4 __ovld __cnfn convert_char4_sat(int4);\n" "char4 __ovld __cnfn convert_char4_rte(uint4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(uint4);\n" "char4 __ovld __cnfn convert_char4_rtz(uint4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(uint4);\n" "char4 __ovld __cnfn convert_char4_rtp(uint4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(uint4);\n" "char4 __ovld __cnfn convert_char4_rtn(uint4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(uint4);\n" "char4 __ovld __cnfn convert_char4(uint4);\n" "char4 __ovld __cnfn convert_char4_sat(uint4);\n" "char4 __ovld __cnfn convert_char4_rte(long4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(long4);\n" "char4 __ovld __cnfn convert_char4_rtz(long4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(long4);\n" "char4 __ovld __cnfn convert_char4_rtp(long4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(long4);\n" "char4 __ovld __cnfn convert_char4_rtn(long4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(long4);\n" "char4 __ovld __cnfn convert_char4(long4);\n" "char4 __ovld __cnfn convert_char4_sat(long4);\n" "char4 __ovld __cnfn convert_char4_rte(ulong4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(ulong4);\n" "char4 __ovld __cnfn convert_char4_rtz(ulong4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);\n" "char4 __ovld __cnfn convert_char4_rtp(ulong4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);\n" "char4 __ovld __cnfn convert_char4_rtn(ulong4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);\n" "char4 __ovld __cnfn convert_char4(ulong4);\n" "char4 __ovld __cnfn convert_char4_sat(ulong4);\n" "char4 __ovld __cnfn convert_char4_rte(float4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(float4);\n" "char4 __ovld __cnfn convert_char4_rtz(float4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(float4);\n" "char4 __ovld __cnfn convert_char4_rtp(float4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(float4);\n" "char4 __ovld __cnfn convert_char4_rtn(float4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(float4);\n" "char4 __ovld __cnfn convert_char4(float4);\n" "char4 __ovld __cnfn convert_char4_sat(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);\n" "uchar4 __ovld __cnfn convert_uchar4(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(char4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);\n" "uchar4 __ovld __cnfn convert_uchar4(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(short4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);\n" "uchar4 __ovld __cnfn convert_uchar4(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(int4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(uint4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);\n" "uchar4 __ovld __cnfn convert_uchar4(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(long4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);\n" "uchar4 __ovld __cnfn convert_uchar4(float4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(float4);\n" "short4 __ovld __cnfn convert_short4_rte(char4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(char4);\n" "short4 __ovld __cnfn convert_short4_rtz(char4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(char4);\n" "short4 __ovld __cnfn convert_short4_rtp(char4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(char4);\n" "short4 __ovld __cnfn convert_short4_rtn(char4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(char4);\n" "short4 __ovld __cnfn convert_short4(char4);\n" "short4 __ovld __cnfn convert_short4_sat(char4);\n" "short4 __ovld __cnfn convert_short4_rte(uchar4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(uchar4);\n" "short4 __ovld __cnfn convert_short4_rtz(uchar4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);\n" "short4 __ovld __cnfn convert_short4_rtp(uchar4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);\n" "short4 __ovld __cnfn convert_short4_rtn(uchar4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);\n" "short4 __ovld __cnfn convert_short4(uchar4);\n" "short4 __ovld __cnfn convert_short4_sat(uchar4);\n" "short4 __ovld __cnfn convert_short4_rte(short4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(short4);\n" "short4 __ovld __cnfn convert_short4_rtz(short4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(short4);\n" "short4 __ovld __cnfn convert_short4_rtp(short4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(short4);\n" "short4 __ovld __cnfn convert_short4_rtn(short4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(short4);\n" "short4 __ovld __cnfn convert_short4(short4);\n" "short4 __ovld __cnfn convert_short4_sat(short4);\n" "short4 __ovld __cnfn convert_short4_rte(ushort4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(ushort4);\n" "short4 __ovld __cnfn convert_short4_rtz(ushort4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);\n" "short4 __ovld __cnfn convert_short4_rtp(ushort4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);\n" "short4 __ovld __cnfn convert_short4_rtn(ushort4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);\n" "short4 __ovld __cnfn convert_short4(ushort4);\n" "short4 __ovld __cnfn convert_short4_sat(ushort4);\n" "short4 __ovld __cnfn convert_short4_rte(int4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(int4);\n" "short4 __ovld __cnfn convert_short4_rtz(int4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(int4);\n" "short4 __ovld __cnfn convert_short4_rtp(int4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(int4);\n" "short4 __ovld __cnfn convert_short4_rtn(int4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(int4);\n" "short4 __ovld __cnfn convert_short4(int4);\n" "short4 __ovld __cnfn convert_short4_sat(int4);\n" "short4 __ovld __cnfn convert_short4_rte(uint4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(uint4);\n" "short4 __ovld __cnfn convert_short4_rtz(uint4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(uint4);\n" "short4 __ovld __cnfn convert_short4_rtp(uint4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(uint4);\n" "short4 __ovld __cnfn convert_short4_rtn(uint4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(uint4);\n" "short4 __ovld __cnfn convert_short4(uint4);\n" "short4 __ovld __cnfn convert_short4_sat(uint4);\n" "short4 __ovld __cnfn convert_short4_rte(long4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(long4);\n" "short4 __ovld __cnfn convert_short4_rtz(long4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(long4);\n" "short4 __ovld __cnfn convert_short4_rtp(long4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(long4);\n" "short4 __ovld __cnfn convert_short4_rtn(long4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(long4);\n" "short4 __ovld __cnfn convert_short4(long4);\n" "short4 __ovld __cnfn convert_short4_sat(long4);\n" "short4 __ovld __cnfn convert_short4_rte(ulong4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(ulong4);\n" "short4 __ovld __cnfn convert_short4_rtz(ulong4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);\n" "short4 __ovld __cnfn convert_short4_rtp(ulong4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);\n" "short4 __ovld __cnfn convert_short4_rtn(ulong4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);\n" "short4 __ovld __cnfn convert_short4(ulong4);\n" "short4 __ovld __cnfn convert_short4_sat(ulong4);\n" "short4 __ovld __cnfn convert_short4_rte(float4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(float4);\n" "short4 __ovld __cnfn convert_short4_rtz(float4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(float4);\n" "short4 __ovld __cnfn convert_short4_rtp(float4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(float4);\n" "short4 __ovld __cnfn convert_short4_rtn(float4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(float4);\n" "short4 __ovld __cnfn convert_short4(float4);\n" "short4 __ovld __cnfn convert_short4_sat(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);\n" "ushort4 __ovld __cnfn convert_ushort4(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(char4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);\n" "ushort4 __ovld __cnfn convert_ushort4(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(short4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);\n" "ushort4 __ovld __cnfn convert_ushort4(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(int4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(uint4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);\n" "ushort4 __ovld __cnfn convert_ushort4(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(long4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);\n" "ushort4 __ovld __cnfn convert_ushort4(float4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(float4);\n" "int4 __ovld __cnfn convert_int4_rte(char4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(char4);\n" "int4 __ovld __cnfn convert_int4_rtz(char4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(char4);\n" "int4 __ovld __cnfn convert_int4_rtp(char4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(char4);\n" "int4 __ovld __cnfn convert_int4_rtn(char4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(char4);\n" "int4 __ovld __cnfn convert_int4(char4);\n" "int4 __ovld __cnfn convert_int4_sat(char4);\n" "int4 __ovld __cnfn convert_int4_rte(uchar4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(uchar4);\n" "int4 __ovld __cnfn convert_int4_rtz(uchar4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);\n" "int4 __ovld __cnfn convert_int4_rtp(uchar4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);\n" "int4 __ovld __cnfn convert_int4_rtn(uchar4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);\n" "int4 __ovld __cnfn convert_int4(uchar4);\n" "int4 __ovld __cnfn convert_int4_sat(uchar4);\n" "int4 __ovld __cnfn convert_int4_rte(short4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(short4);\n" "int4 __ovld __cnfn convert_int4_rtz(short4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(short4);\n" "int4 __ovld __cnfn convert_int4_rtp(short4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(short4);\n" "int4 __ovld __cnfn convert_int4_rtn(short4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(short4);\n" "int4 __ovld __cnfn convert_int4(short4);\n" "int4 __ovld __cnfn convert_int4_sat(short4);\n" "int4 __ovld __cnfn convert_int4_rte(ushort4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(ushort4);\n" "int4 __ovld __cnfn convert_int4_rtz(ushort4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);\n" "int4 __ovld __cnfn convert_int4_rtp(ushort4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);\n" "int4 __ovld __cnfn convert_int4_rtn(ushort4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);\n" "int4 __ovld __cnfn convert_int4(ushort4);\n" "int4 __ovld __cnfn convert_int4_sat(ushort4);\n" "int4 __ovld __cnfn convert_int4_rte(int4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(int4);\n" "int4 __ovld __cnfn convert_int4_rtz(int4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(int4);\n" "int4 __ovld __cnfn convert_int4_rtp(int4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(int4);\n" "int4 __ovld __cnfn convert_int4_rtn(int4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(int4);\n" "int4 __ovld __cnfn convert_int4(int4);\n" "int4 __ovld __cnfn convert_int4_sat(int4);\n" "int4 __ovld __cnfn convert_int4_rte(uint4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(uint4);\n" "int4 __ovld __cnfn convert_int4_rtz(uint4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(uint4);\n" "int4 __ovld __cnfn convert_int4_rtp(uint4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(uint4);\n" "int4 __ovld __cnfn convert_int4_rtn(uint4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(uint4);\n" "int4 __ovld __cnfn convert_int4(uint4);\n" "int4 __ovld __cnfn convert_int4_sat(uint4);\n" "int4 __ovld __cnfn convert_int4_rte(long4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(long4);\n" "int4 __ovld __cnfn convert_int4_rtz(long4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(long4);\n" "int4 __ovld __cnfn convert_int4_rtp(long4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(long4);\n" "int4 __ovld __cnfn convert_int4_rtn(long4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(long4);\n" "int4 __ovld __cnfn convert_int4(long4);\n" "int4 __ovld __cnfn convert_int4_sat(long4);\n" "int4 __ovld __cnfn convert_int4_rte(ulong4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(ulong4);\n" "int4 __ovld __cnfn convert_int4_rtz(ulong4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);\n" "int4 __ovld __cnfn convert_int4_rtp(ulong4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);\n" "int4 __ovld __cnfn convert_int4_rtn(ulong4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);\n" "int4 __ovld __cnfn convert_int4(ulong4);\n" "int4 __ovld __cnfn convert_int4_sat(ulong4);\n" "int4 __ovld __cnfn convert_int4_rte(float4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(float4);\n" "int4 __ovld __cnfn convert_int4_rtz(float4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(float4);\n" "int4 __ovld __cnfn convert_int4_rtp(float4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(float4);\n" "int4 __ovld __cnfn convert_int4_rtn(float4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(float4);\n" "int4 __ovld __cnfn convert_int4(float4);\n" "int4 __ovld __cnfn convert_int4_sat(float4);\n" "uint4 __ovld __cnfn convert_uint4_rte(char4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(char4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(char4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(char4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(char4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);\n" "uint4 __ovld __cnfn convert_uint4(char4);\n" "uint4 __ovld __cnfn convert_uint4_sat(char4);\n" "uint4 __ovld __cnfn convert_uint4_rte(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);\n" "uint4 __ovld __cnfn convert_uint4(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_sat(uchar4);\n" "uint4 __ovld __cnfn convert_uint4_rte(short4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(short4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(short4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(short4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(short4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);\n" "uint4 __ovld __cnfn convert_uint4(short4);\n" "uint4 __ovld __cnfn convert_uint4_sat(short4);\n" "uint4 __ovld __cnfn convert_uint4_rte(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);\n" "uint4 __ovld __cnfn convert_uint4(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_sat(ushort4);\n" "uint4 __ovld __cnfn convert_uint4_rte(int4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(int4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(int4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(int4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(int4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);\n" "uint4 __ovld __cnfn convert_uint4(int4);\n" "uint4 __ovld __cnfn convert_uint4_sat(int4);\n" "uint4 __ovld __cnfn convert_uint4_rte(uint4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(uint4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(uint4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(uint4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);\n" "uint4 __ovld __cnfn convert_uint4(uint4);\n" "uint4 __ovld __cnfn convert_uint4_sat(uint4);\n" "uint4 __ovld __cnfn convert_uint4_rte(long4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(long4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(long4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(long4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(long4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);\n" "uint4 __ovld __cnfn convert_uint4(long4);\n" "uint4 __ovld __cnfn convert_uint4_sat(long4);\n" "uint4 __ovld __cnfn convert_uint4_rte(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);\n" "uint4 __ovld __cnfn convert_uint4(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_sat(ulong4);\n" "uint4 __ovld __cnfn convert_uint4_rte(float4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(float4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(float4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(float4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(float4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);\n" "uint4 __ovld __cnfn convert_uint4(float4);\n" "uint4 __ovld __cnfn convert_uint4_sat(float4);\n" "long4 __ovld __cnfn convert_long4_rte(char4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(char4);\n" "long4 __ovld __cnfn convert_long4_rtz(char4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(char4);\n" "long4 __ovld __cnfn convert_long4_rtp(char4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(char4);\n" "long4 __ovld __cnfn convert_long4_rtn(char4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(char4);\n" "long4 __ovld __cnfn convert_long4(char4);\n" "long4 __ovld __cnfn convert_long4_sat(char4);\n" "long4 __ovld __cnfn convert_long4_rte(uchar4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(uchar4);\n" "long4 __ovld __cnfn convert_long4_rtz(uchar4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);\n" "long4 __ovld __cnfn convert_long4_rtp(uchar4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);\n" "long4 __ovld __cnfn convert_long4_rtn(uchar4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);\n" "long4 __ovld __cnfn convert_long4(uchar4);\n" "long4 __ovld __cnfn convert_long4_sat(uchar4);\n" "long4 __ovld __cnfn convert_long4_rte(short4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(short4);\n" "long4 __ovld __cnfn convert_long4_rtz(short4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(short4);\n" "long4 __ovld __cnfn convert_long4_rtp(short4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(short4);\n" "long4 __ovld __cnfn convert_long4_rtn(short4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(short4);\n" "long4 __ovld __cnfn convert_long4(short4);\n" "long4 __ovld __cnfn convert_long4_sat(short4);\n" "long4 __ovld __cnfn convert_long4_rte(ushort4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(ushort4);\n" "long4 __ovld __cnfn convert_long4_rtz(ushort4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);\n" "long4 __ovld __cnfn convert_long4_rtp(ushort4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);\n" "long4 __ovld __cnfn convert_long4_rtn(ushort4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);\n" "long4 __ovld __cnfn convert_long4(ushort4);\n" "long4 __ovld __cnfn convert_long4_sat(ushort4);\n" "long4 __ovld __cnfn convert_long4_rte(int4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(int4);\n" "long4 __ovld __cnfn convert_long4_rtz(int4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(int4);\n" "long4 __ovld __cnfn convert_long4_rtp(int4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(int4);\n" "long4 __ovld __cnfn convert_long4_rtn(int4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(int4);\n" "long4 __ovld __cnfn convert_long4(int4);\n" "long4 __ovld __cnfn convert_long4_sat(int4);\n" "long4 __ovld __cnfn convert_long4_rte(uint4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(uint4);\n" "long4 __ovld __cnfn convert_long4_rtz(uint4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(uint4);\n" "long4 __ovld __cnfn convert_long4_rtp(uint4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(uint4);\n" "long4 __ovld __cnfn convert_long4_rtn(uint4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(uint4);\n" "long4 __ovld __cnfn convert_long4(uint4);\n" "long4 __ovld __cnfn convert_long4_sat(uint4);\n" "long4 __ovld __cnfn convert_long4_rte(long4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(long4);\n" "long4 __ovld __cnfn convert_long4_rtz(long4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(long4);\n" "long4 __ovld __cnfn convert_long4_rtp(long4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(long4);\n" "long4 __ovld __cnfn convert_long4_rtn(long4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(long4);\n" "long4 __ovld __cnfn convert_long4(long4);\n" "long4 __ovld __cnfn convert_long4_sat(long4);\n" "long4 __ovld __cnfn convert_long4_rte(ulong4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(ulong4);\n" "long4 __ovld __cnfn convert_long4_rtz(ulong4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);\n" "long4 __ovld __cnfn convert_long4_rtp(ulong4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);\n" "long4 __ovld __cnfn convert_long4_rtn(ulong4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);\n" "long4 __ovld __cnfn convert_long4(ulong4);\n" "long4 __ovld __cnfn convert_long4_sat(ulong4);\n" "long4 __ovld __cnfn convert_long4_rte(float4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(float4);\n" "long4 __ovld __cnfn convert_long4_rtz(float4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(float4);\n" "long4 __ovld __cnfn convert_long4_rtp(float4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(float4);\n" "long4 __ovld __cnfn convert_long4_rtn(float4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(float4);\n" "long4 __ovld __cnfn convert_long4(float4);\n" "long4 __ovld __cnfn convert_long4_sat(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);\n" "ulong4 __ovld __cnfn convert_ulong4(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(char4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);\n" "ulong4 __ovld __cnfn convert_ulong4(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(short4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);\n" "ulong4 __ovld __cnfn convert_ulong4(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(int4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(uint4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);\n" "ulong4 __ovld __cnfn convert_ulong4(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(long4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);\n" "ulong4 __ovld __cnfn convert_ulong4(float4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(float4);\n" "float4 __ovld __cnfn convert_float4_rte(char4);\n" "float4 __ovld __cnfn convert_float4_rtz(char4);\n" "float4 __ovld __cnfn convert_float4_rtp(char4);\n" "float4 __ovld __cnfn convert_float4_rtn(char4);\n" "float4 __ovld __cnfn convert_float4(char4);\n" "float4 __ovld __cnfn convert_float4_rte(uchar4);\n" "float4 __ovld __cnfn convert_float4_rtz(uchar4);\n" "float4 __ovld __cnfn convert_float4_rtp(uchar4);\n" "float4 __ovld __cnfn convert_float4_rtn(uchar4);\n" "float4 __ovld __cnfn convert_float4(uchar4);\n" "float4 __ovld __cnfn convert_float4_rte(short4);\n" "float4 __ovld __cnfn convert_float4_rtz(short4);\n" "float4 __ovld __cnfn convert_float4_rtp(short4);\n" "float4 __ovld __cnfn convert_float4_rtn(short4);\n" "float4 __ovld __cnfn convert_float4(short4);\n" "float4 __ovld __cnfn convert_float4_rte(ushort4);\n" "float4 __ovld __cnfn convert_float4_rtz(ushort4);\n" "float4 __ovld __cnfn convert_float4_rtp(ushort4);\n" "float4 __ovld __cnfn convert_float4_rtn(ushort4);\n" "float4 __ovld __cnfn convert_float4(ushort4);\n" "float4 __ovld __cnfn convert_float4_rte(int4);\n" "float4 __ovld __cnfn convert_float4_rtz(int4);\n" "float4 __ovld __cnfn convert_float4_rtp(int4);\n" "float4 __ovld __cnfn convert_float4_rtn(int4);\n" "float4 __ovld __cnfn convert_float4(int4);\n" "float4 __ovld __cnfn convert_float4_rte(uint4);\n" "float4 __ovld __cnfn convert_float4_rtz(uint4);\n" "float4 __ovld __cnfn convert_float4_rtp(uint4);\n" "float4 __ovld __cnfn convert_float4_rtn(uint4);\n" "float4 __ovld __cnfn convert_float4(uint4);\n" "float4 __ovld __cnfn convert_float4_rte(long4);\n" "float4 __ovld __cnfn convert_float4_rtz(long4);\n" "float4 __ovld __cnfn convert_float4_rtp(long4);\n" "float4 __ovld __cnfn convert_float4_rtn(long4);\n" "float4 __ovld __cnfn convert_float4(long4);\n" "float4 __ovld __cnfn convert_float4_rte(ulong4);\n" "float4 __ovld __cnfn convert_float4_rtz(ulong4);\n" "float4 __ovld __cnfn convert_float4_rtp(ulong4);\n" "float4 __ovld __cnfn convert_float4_rtn(ulong4);\n" "float4 __ovld __cnfn convert_float4(ulong4);\n" "float4 __ovld __cnfn convert_float4_rte(float4);\n" "float4 __ovld __cnfn convert_float4_rtz(float4);\n" "float4 __ovld __cnfn convert_float4_rtp(float4);\n" "float4 __ovld __cnfn convert_float4_rtn(float4);\n" "float4 __ovld __cnfn convert_float4(float4);\n" "char8 __ovld __cnfn convert_char8_rte(char8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(char8);\n" "char8 __ovld __cnfn convert_char8_rtz(char8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(char8);\n" "char8 __ovld __cnfn convert_char8_rtp(char8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(char8);\n" "char8 __ovld __cnfn convert_char8_rtn(char8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(char8);\n" "char8 __ovld __cnfn convert_char8(char8);\n" "char8 __ovld __cnfn convert_char8_sat(char8);\n" "char8 __ovld __cnfn convert_char8_rte(uchar8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(uchar8);\n" "char8 __ovld __cnfn convert_char8_rtz(uchar8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);\n" "char8 __ovld __cnfn convert_char8_rtp(uchar8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);\n" "char8 __ovld __cnfn convert_char8_rtn(uchar8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);\n" "char8 __ovld __cnfn convert_char8(uchar8);\n" "char8 __ovld __cnfn convert_char8_sat(uchar8);\n" "char8 __ovld __cnfn convert_char8_rte(short8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(short8);\n" "char8 __ovld __cnfn convert_char8_rtz(short8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(short8);\n" "char8 __ovld __cnfn convert_char8_rtp(short8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(short8);\n" "char8 __ovld __cnfn convert_char8_rtn(short8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(short8);\n" "char8 __ovld __cnfn convert_char8(short8);\n" "char8 __ovld __cnfn convert_char8_sat(short8);\n" "char8 __ovld __cnfn convert_char8_rte(ushort8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(ushort8);\n" "char8 __ovld __cnfn convert_char8_rtz(ushort8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);\n" "char8 __ovld __cnfn convert_char8_rtp(ushort8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);\n" "char8 __ovld __cnfn convert_char8_rtn(ushort8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);\n" "char8 __ovld __cnfn convert_char8(ushort8);\n" "char8 __ovld __cnfn convert_char8_sat(ushort8);\n" "char8 __ovld __cnfn convert_char8_rte(int8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(int8);\n" "char8 __ovld __cnfn convert_char8_rtz(int8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(int8);\n" "char8 __ovld __cnfn convert_char8_rtp(int8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(int8);\n" "char8 __ovld __cnfn convert_char8_rtn(int8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(int8);\n" "char8 __ovld __cnfn convert_char8(int8);\n" "char8 __ovld __cnfn convert_char8_sat(int8);\n" "char8 __ovld __cnfn convert_char8_rte(uint8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(uint8);\n" "char8 __ovld __cnfn convert_char8_rtz(uint8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(uint8);\n" "char8 __ovld __cnfn convert_char8_rtp(uint8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(uint8);\n" "char8 __ovld __cnfn convert_char8_rtn(uint8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(uint8);\n" "char8 __ovld __cnfn convert_char8(uint8);\n" "char8 __ovld __cnfn convert_char8_sat(uint8);\n" "char8 __ovld __cnfn convert_char8_rte(long8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(long8);\n" "char8 __ovld __cnfn convert_char8_rtz(long8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(long8);\n" "char8 __ovld __cnfn convert_char8_rtp(long8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(long8);\n" "char8 __ovld __cnfn convert_char8_rtn(long8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(long8);\n" "char8 __ovld __cnfn convert_char8(long8);\n" "char8 __ovld __cnfn convert_char8_sat(long8);\n" "char8 __ovld __cnfn convert_char8_rte(ulong8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(ulong8);\n" "char8 __ovld __cnfn convert_char8_rtz(ulong8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);\n" "char8 __ovld __cnfn convert_char8_rtp(ulong8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);\n" "char8 __ovld __cnfn convert_char8_rtn(ulong8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);\n" "char8 __ovld __cnfn convert_char8(ulong8);\n" "char8 __ovld __cnfn convert_char8_sat(ulong8);\n" "char8 __ovld __cnfn convert_char8_rte(float8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(float8);\n" "char8 __ovld __cnfn convert_char8_rtz(float8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(float8);\n" "char8 __ovld __cnfn convert_char8_rtp(float8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(float8);\n" "char8 __ovld __cnfn convert_char8_rtn(float8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(float8);\n" "char8 __ovld __cnfn convert_char8(float8);\n" "char8 __ovld __cnfn convert_char8_sat(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);\n" "uchar8 __ovld __cnfn convert_uchar8(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(char8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);\n" "uchar8 __ovld __cnfn convert_uchar8(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(short8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);\n" "uchar8 __ovld __cnfn convert_uchar8(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(int8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(uint8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);\n" "uchar8 __ovld __cnfn convert_uchar8(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(long8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);\n" "uchar8 __ovld __cnfn convert_uchar8(float8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(float8);\n" "short8 __ovld __cnfn convert_short8_rte(char8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(char8);\n" "short8 __ovld __cnfn convert_short8_rtz(char8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(char8);\n" "short8 __ovld __cnfn convert_short8_rtp(char8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(char8);\n" "short8 __ovld __cnfn convert_short8_rtn(char8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(char8);\n" "short8 __ovld __cnfn convert_short8(char8);\n" "short8 __ovld __cnfn convert_short8_sat(char8);\n" "short8 __ovld __cnfn convert_short8_rte(uchar8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(uchar8);\n" "short8 __ovld __cnfn convert_short8_rtz(uchar8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);\n" "short8 __ovld __cnfn convert_short8_rtp(uchar8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);\n" "short8 __ovld __cnfn convert_short8_rtn(uchar8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);\n" "short8 __ovld __cnfn convert_short8(uchar8);\n" "short8 __ovld __cnfn convert_short8_sat(uchar8);\n" "short8 __ovld __cnfn convert_short8_rte(short8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(short8);\n" "short8 __ovld __cnfn convert_short8_rtz(short8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(short8);\n" "short8 __ovld __cnfn convert_short8_rtp(short8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(short8);\n" "short8 __ovld __cnfn convert_short8_rtn(short8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(short8);\n" "short8 __ovld __cnfn convert_short8(short8);\n" "short8 __ovld __cnfn convert_short8_sat(short8);\n" "short8 __ovld __cnfn convert_short8_rte(ushort8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(ushort8);\n" "short8 __ovld __cnfn convert_short8_rtz(ushort8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);\n" "short8 __ovld __cnfn convert_short8_rtp(ushort8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);\n" "short8 __ovld __cnfn convert_short8_rtn(ushort8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);\n" "short8 __ovld __cnfn convert_short8(ushort8);\n" "short8 __ovld __cnfn convert_short8_sat(ushort8);\n" "short8 __ovld __cnfn convert_short8_rte(int8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(int8);\n" "short8 __ovld __cnfn convert_short8_rtz(int8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(int8);\n" "short8 __ovld __cnfn convert_short8_rtp(int8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(int8);\n" "short8 __ovld __cnfn convert_short8_rtn(int8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(int8);\n" "short8 __ovld __cnfn convert_short8(int8);\n" "short8 __ovld __cnfn convert_short8_sat(int8);\n" "short8 __ovld __cnfn convert_short8_rte(uint8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(uint8);\n" "short8 __ovld __cnfn convert_short8_rtz(uint8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(uint8);\n" "short8 __ovld __cnfn convert_short8_rtp(uint8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(uint8);\n" "short8 __ovld __cnfn convert_short8_rtn(uint8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(uint8);\n" "short8 __ovld __cnfn convert_short8(uint8);\n" "short8 __ovld __cnfn convert_short8_sat(uint8);\n" "short8 __ovld __cnfn convert_short8_rte(long8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(long8);\n" "short8 __ovld __cnfn convert_short8_rtz(long8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(long8);\n" "short8 __ovld __cnfn convert_short8_rtp(long8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(long8);\n" "short8 __ovld __cnfn convert_short8_rtn(long8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(long8);\n" "short8 __ovld __cnfn convert_short8(long8);\n" "short8 __ovld __cnfn convert_short8_sat(long8);\n" "short8 __ovld __cnfn convert_short8_rte(ulong8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(ulong8);\n" "short8 __ovld __cnfn convert_short8_rtz(ulong8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);\n" "short8 __ovld __cnfn convert_short8_rtp(ulong8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);\n" "short8 __ovld __cnfn convert_short8_rtn(ulong8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);\n" "short8 __ovld __cnfn convert_short8(ulong8);\n" "short8 __ovld __cnfn convert_short8_sat(ulong8);\n" "short8 __ovld __cnfn convert_short8_rte(float8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(float8);\n" "short8 __ovld __cnfn convert_short8_rtz(float8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(float8);\n" "short8 __ovld __cnfn convert_short8_rtp(float8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(float8);\n" "short8 __ovld __cnfn convert_short8_rtn(float8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(float8);\n" "short8 __ovld __cnfn convert_short8(float8);\n" "short8 __ovld __cnfn convert_short8_sat(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);\n" "ushort8 __ovld __cnfn convert_ushort8(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(char8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);\n" "ushort8 __ovld __cnfn convert_ushort8(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(short8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);\n" "ushort8 __ovld __cnfn convert_ushort8(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(int8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(uint8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);\n" "ushort8 __ovld __cnfn convert_ushort8(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(long8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);\n" "ushort8 __ovld __cnfn convert_ushort8(float8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(float8);\n" "int8 __ovld __cnfn convert_int8_rte(char8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(char8);\n" "int8 __ovld __cnfn convert_int8_rtz(char8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(char8);\n" "int8 __ovld __cnfn convert_int8_rtp(char8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(char8);\n" "int8 __ovld __cnfn convert_int8_rtn(char8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(char8);\n" "int8 __ovld __cnfn convert_int8(char8);\n" "int8 __ovld __cnfn convert_int8_sat(char8);\n" "int8 __ovld __cnfn convert_int8_rte(uchar8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(uchar8);\n" "int8 __ovld __cnfn convert_int8_rtz(uchar8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);\n" "int8 __ovld __cnfn convert_int8_rtp(uchar8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);\n" "int8 __ovld __cnfn convert_int8_rtn(uchar8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);\n" "int8 __ovld __cnfn convert_int8(uchar8);\n" "int8 __ovld __cnfn convert_int8_sat(uchar8);\n" "int8 __ovld __cnfn convert_int8_rte(short8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(short8);\n" "int8 __ovld __cnfn convert_int8_rtz(short8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(short8);\n" "int8 __ovld __cnfn convert_int8_rtp(short8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(short8);\n" "int8 __ovld __cnfn convert_int8_rtn(short8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(short8);\n" "int8 __ovld __cnfn convert_int8(short8);\n" "int8 __ovld __cnfn convert_int8_sat(short8);\n" "int8 __ovld __cnfn convert_int8_rte(ushort8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(ushort8);\n" "int8 __ovld __cnfn convert_int8_rtz(ushort8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);\n" "int8 __ovld __cnfn convert_int8_rtp(ushort8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);\n" "int8 __ovld __cnfn convert_int8_rtn(ushort8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);\n" "int8 __ovld __cnfn convert_int8(ushort8);\n" "int8 __ovld __cnfn convert_int8_sat(ushort8);\n" "int8 __ovld __cnfn convert_int8_rte(int8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(int8);\n" "int8 __ovld __cnfn convert_int8_rtz(int8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(int8);\n" "int8 __ovld __cnfn convert_int8_rtp(int8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(int8);\n" "int8 __ovld __cnfn convert_int8_rtn(int8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(int8);\n" "int8 __ovld __cnfn convert_int8(int8);\n" "int8 __ovld __cnfn convert_int8_sat(int8);\n" "int8 __ovld __cnfn convert_int8_rte(uint8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(uint8);\n" "int8 __ovld __cnfn convert_int8_rtz(uint8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(uint8);\n" "int8 __ovld __cnfn convert_int8_rtp(uint8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(uint8);\n" "int8 __ovld __cnfn convert_int8_rtn(uint8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(uint8);\n" "int8 __ovld __cnfn convert_int8(uint8);\n" "int8 __ovld __cnfn convert_int8_sat(uint8);\n" "int8 __ovld __cnfn convert_int8_rte(long8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(long8);\n" "int8 __ovld __cnfn convert_int8_rtz(long8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(long8);\n" "int8 __ovld __cnfn convert_int8_rtp(long8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(long8);\n" "int8 __ovld __cnfn convert_int8_rtn(long8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(long8);\n" "int8 __ovld __cnfn convert_int8(long8);\n" "int8 __ovld __cnfn convert_int8_sat(long8);\n" "int8 __ovld __cnfn convert_int8_rte(ulong8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(ulong8);\n" "int8 __ovld __cnfn convert_int8_rtz(ulong8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);\n" "int8 __ovld __cnfn convert_int8_rtp(ulong8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);\n" "int8 __ovld __cnfn convert_int8_rtn(ulong8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);\n" "int8 __ovld __cnfn convert_int8(ulong8);\n" "int8 __ovld __cnfn convert_int8_sat(ulong8);\n" "int8 __ovld __cnfn convert_int8_rte(float8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(float8);\n" "int8 __ovld __cnfn convert_int8_rtz(float8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(float8);\n" "int8 __ovld __cnfn convert_int8_rtp(float8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(float8);\n" "int8 __ovld __cnfn convert_int8_rtn(float8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(float8);\n" "int8 __ovld __cnfn convert_int8(float8);\n" "int8 __ovld __cnfn convert_int8_sat(float8);\n" "uint8 __ovld __cnfn convert_uint8_rte(char8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(char8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(char8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(char8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(char8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);\n" "uint8 __ovld __cnfn convert_uint8(char8);\n" "uint8 __ovld __cnfn convert_uint8_sat(char8);\n" "uint8 __ovld __cnfn convert_uint8_rte(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);\n" "uint8 __ovld __cnfn convert_uint8(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_sat(uchar8);\n" "uint8 __ovld __cnfn convert_uint8_rte(short8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(short8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(short8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(short8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(short8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);\n" "uint8 __ovld __cnfn convert_uint8(short8);\n" "uint8 __ovld __cnfn convert_uint8_sat(short8);\n" "uint8 __ovld __cnfn convert_uint8_rte(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);\n" "uint8 __ovld __cnfn convert_uint8(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_sat(ushort8);\n" "uint8 __ovld __cnfn convert_uint8_rte(int8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(int8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(int8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(int8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(int8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);\n" "uint8 __ovld __cnfn convert_uint8(int8);\n" "uint8 __ovld __cnfn convert_uint8_sat(int8);\n" "uint8 __ovld __cnfn convert_uint8_rte(uint8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(uint8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(uint8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(uint8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);\n" "uint8 __ovld __cnfn convert_uint8(uint8);\n" "uint8 __ovld __cnfn convert_uint8_sat(uint8);\n" "uint8 __ovld __cnfn convert_uint8_rte(long8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(long8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(long8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(long8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(long8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);\n" "uint8 __ovld __cnfn convert_uint8(long8);\n" "uint8 __ovld __cnfn convert_uint8_sat(long8);\n" "uint8 __ovld __cnfn convert_uint8_rte(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);\n" "uint8 __ovld __cnfn convert_uint8(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_sat(ulong8);\n" "uint8 __ovld __cnfn convert_uint8_rte(float8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(float8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(float8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(float8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(float8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);\n" "uint8 __ovld __cnfn convert_uint8(float8);\n" "uint8 __ovld __cnfn convert_uint8_sat(float8);\n" "long8 __ovld __cnfn convert_long8_rte(char8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(char8);\n" "long8 __ovld __cnfn convert_long8_rtz(char8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(char8);\n" "long8 __ovld __cnfn convert_long8_rtp(char8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(char8);\n" "long8 __ovld __cnfn convert_long8_rtn(char8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(char8);\n" "long8 __ovld __cnfn convert_long8(char8);\n" "long8 __ovld __cnfn convert_long8_sat(char8);\n" "long8 __ovld __cnfn convert_long8_rte(uchar8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(uchar8);\n" "long8 __ovld __cnfn convert_long8_rtz(uchar8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);\n" "long8 __ovld __cnfn convert_long8_rtp(uchar8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);\n" "long8 __ovld __cnfn convert_long8_rtn(uchar8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);\n" "long8 __ovld __cnfn convert_long8(uchar8);\n" "long8 __ovld __cnfn convert_long8_sat(uchar8);\n" "long8 __ovld __cnfn convert_long8_rte(short8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(short8);\n" "long8 __ovld __cnfn convert_long8_rtz(short8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(short8);\n" "long8 __ovld __cnfn convert_long8_rtp(short8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(short8);\n" "long8 __ovld __cnfn convert_long8_rtn(short8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(short8);\n" "long8 __ovld __cnfn convert_long8(short8);\n" "long8 __ovld __cnfn convert_long8_sat(short8);\n" "long8 __ovld __cnfn convert_long8_rte(ushort8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(ushort8);\n" "long8 __ovld __cnfn convert_long8_rtz(ushort8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);\n" "long8 __ovld __cnfn convert_long8_rtp(ushort8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);\n" "long8 __ovld __cnfn convert_long8_rtn(ushort8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);\n" "long8 __ovld __cnfn convert_long8(ushort8);\n" "long8 __ovld __cnfn convert_long8_sat(ushort8);\n" "long8 __ovld __cnfn convert_long8_rte(int8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(int8);\n" "long8 __ovld __cnfn convert_long8_rtz(int8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(int8);\n" "long8 __ovld __cnfn convert_long8_rtp(int8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(int8);\n" "long8 __ovld __cnfn convert_long8_rtn(int8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(int8);\n" "long8 __ovld __cnfn convert_long8(int8);\n" "long8 __ovld __cnfn convert_long8_sat(int8);\n" "long8 __ovld __cnfn convert_long8_rte(uint8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(uint8);\n" "long8 __ovld __cnfn convert_long8_rtz(uint8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(uint8);\n" "long8 __ovld __cnfn convert_long8_rtp(uint8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(uint8);\n" "long8 __ovld __cnfn convert_long8_rtn(uint8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(uint8);\n" "long8 __ovld __cnfn convert_long8(uint8);\n" "long8 __ovld __cnfn convert_long8_sat(uint8);\n" "long8 __ovld __cnfn convert_long8_rte(long8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(long8);\n" "long8 __ovld __cnfn convert_long8_rtz(long8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(long8);\n" "long8 __ovld __cnfn convert_long8_rtp(long8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(long8);\n" "long8 __ovld __cnfn convert_long8_rtn(long8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(long8);\n" "long8 __ovld __cnfn convert_long8(long8);\n" "long8 __ovld __cnfn convert_long8_sat(long8);\n" "long8 __ovld __cnfn convert_long8_rte(ulong8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(ulong8);\n" "long8 __ovld __cnfn convert_long8_rtz(ulong8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);\n" "long8 __ovld __cnfn convert_long8_rtp(ulong8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);\n" "long8 __ovld __cnfn convert_long8_rtn(ulong8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);\n" "long8 __ovld __cnfn convert_long8(ulong8);\n" "long8 __ovld __cnfn convert_long8_sat(ulong8);\n" "long8 __ovld __cnfn convert_long8_rte(float8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(float8);\n" "long8 __ovld __cnfn convert_long8_rtz(float8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(float8);\n" "long8 __ovld __cnfn convert_long8_rtp(float8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(float8);\n" "long8 __ovld __cnfn convert_long8_rtn(float8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(float8);\n" "long8 __ovld __cnfn convert_long8(float8);\n" "long8 __ovld __cnfn convert_long8_sat(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);\n" "ulong8 __ovld __cnfn convert_ulong8(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(char8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);\n" "ulong8 __ovld __cnfn convert_ulong8(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(short8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);\n" "ulong8 __ovld __cnfn convert_ulong8(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(int8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(uint8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);\n" "ulong8 __ovld __cnfn convert_ulong8(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(long8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);\n" "ulong8 __ovld __cnfn convert_ulong8(float8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(float8);\n" "float8 __ovld __cnfn convert_float8_rte(char8);\n" "float8 __ovld __cnfn convert_float8_rtz(char8);\n" "float8 __ovld __cnfn convert_float8_rtp(char8);\n" "float8 __ovld __cnfn convert_float8_rtn(char8);\n" "float8 __ovld __cnfn convert_float8(char8);\n" "float8 __ovld __cnfn convert_float8_rte(uchar8);\n" "float8 __ovld __cnfn convert_float8_rtz(uchar8);\n" "float8 __ovld __cnfn convert_float8_rtp(uchar8);\n" "float8 __ovld __cnfn convert_float8_rtn(uchar8);\n" "float8 __ovld __cnfn convert_float8(uchar8);\n" "float8 __ovld __cnfn convert_float8_rte(short8);\n" "float8 __ovld __cnfn convert_float8_rtz(short8);\n" "float8 __ovld __cnfn convert_float8_rtp(short8);\n" "float8 __ovld __cnfn convert_float8_rtn(short8);\n" "float8 __ovld __cnfn convert_float8(short8);\n" "float8 __ovld __cnfn convert_float8_rte(ushort8);\n" "float8 __ovld __cnfn convert_float8_rtz(ushort8);\n" "float8 __ovld __cnfn convert_float8_rtp(ushort8);\n" "float8 __ovld __cnfn convert_float8_rtn(ushort8);\n" "float8 __ovld __cnfn convert_float8(ushort8);\n" "float8 __ovld __cnfn convert_float8_rte(int8);\n" "float8 __ovld __cnfn convert_float8_rtz(int8);\n" "float8 __ovld __cnfn convert_float8_rtp(int8);\n" "float8 __ovld __cnfn convert_float8_rtn(int8);\n" "float8 __ovld __cnfn convert_float8(int8);\n" "float8 __ovld __cnfn convert_float8_rte(uint8);\n" "float8 __ovld __cnfn convert_float8_rtz(uint8);\n" "float8 __ovld __cnfn convert_float8_rtp(uint8);\n" "float8 __ovld __cnfn convert_float8_rtn(uint8);\n" "float8 __ovld __cnfn convert_float8(uint8);\n" "float8 __ovld __cnfn convert_float8_rte(long8);\n" "float8 __ovld __cnfn convert_float8_rtz(long8);\n" "float8 __ovld __cnfn convert_float8_rtp(long8);\n" "float8 __ovld __cnfn convert_float8_rtn(long8);\n" "float8 __ovld __cnfn convert_float8(long8);\n" "float8 __ovld __cnfn convert_float8_rte(ulong8);\n" "float8 __ovld __cnfn convert_float8_rtz(ulong8);\n" "float8 __ovld __cnfn convert_float8_rtp(ulong8);\n" "float8 __ovld __cnfn convert_float8_rtn(ulong8);\n" "float8 __ovld __cnfn convert_float8(ulong8);\n" "float8 __ovld __cnfn convert_float8_rte(float8);\n" "float8 __ovld __cnfn convert_float8_rtz(float8);\n" "float8 __ovld __cnfn convert_float8_rtp(float8);\n" "float8 __ovld __cnfn convert_float8_rtn(float8);\n" "float8 __ovld __cnfn convert_float8(float8);\n" "char16 __ovld __cnfn convert_char16_rte(char16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(char16);\n" "char16 __ovld __cnfn convert_char16_rtz(char16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(char16);\n" "char16 __ovld __cnfn convert_char16_rtp(char16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(char16);\n" "char16 __ovld __cnfn convert_char16_rtn(char16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(char16);\n" "char16 __ovld __cnfn convert_char16(char16);\n" "char16 __ovld __cnfn convert_char16_sat(char16);\n" "char16 __ovld __cnfn convert_char16_rte(uchar16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(uchar16);\n" "char16 __ovld __cnfn convert_char16_rtz(uchar16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);\n" "char16 __ovld __cnfn convert_char16_rtp(uchar16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);\n" "char16 __ovld __cnfn convert_char16_rtn(uchar16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);\n" "char16 __ovld __cnfn convert_char16(uchar16);\n" "char16 __ovld __cnfn convert_char16_sat(uchar16);\n" "char16 __ovld __cnfn convert_char16_rte(short16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(short16);\n" "char16 __ovld __cnfn convert_char16_rtz(short16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(short16);\n" "char16 __ovld __cnfn convert_char16_rtp(short16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(short16);\n" "char16 __ovld __cnfn convert_char16_rtn(short16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(short16);\n" "char16 __ovld __cnfn convert_char16(short16);\n" "char16 __ovld __cnfn convert_char16_sat(short16);\n" "char16 __ovld __cnfn convert_char16_rte(ushort16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(ushort16);\n" "char16 __ovld __cnfn convert_char16_rtz(ushort16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);\n" "char16 __ovld __cnfn convert_char16_rtp(ushort16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);\n" "char16 __ovld __cnfn convert_char16_rtn(ushort16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);\n" "char16 __ovld __cnfn convert_char16(ushort16);\n" "char16 __ovld __cnfn convert_char16_sat(ushort16);\n" "char16 __ovld __cnfn convert_char16_rte(int16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(int16);\n" "char16 __ovld __cnfn convert_char16_rtz(int16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(int16);\n" "char16 __ovld __cnfn convert_char16_rtp(int16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(int16);\n" "char16 __ovld __cnfn convert_char16_rtn(int16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(int16);\n" "char16 __ovld __cnfn convert_char16(int16);\n" "char16 __ovld __cnfn convert_char16_sat(int16);\n" "char16 __ovld __cnfn convert_char16_rte(uint16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(uint16);\n" "char16 __ovld __cnfn convert_char16_rtz(uint16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(uint16);\n" "char16 __ovld __cnfn convert_char16_rtp(uint16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(uint16);\n" "char16 __ovld __cnfn convert_char16_rtn(uint16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(uint16);\n" "char16 __ovld __cnfn convert_char16(uint16);\n" "char16 __ovld __cnfn convert_char16_sat(uint16);\n" "char16 __ovld __cnfn convert_char16_rte(long16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(long16);\n" "char16 __ovld __cnfn convert_char16_rtz(long16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(long16);\n" "char16 __ovld __cnfn convert_char16_rtp(long16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(long16);\n" "char16 __ovld __cnfn convert_char16_rtn(long16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(long16);\n" "char16 __ovld __cnfn convert_char16(long16);\n" "char16 __ovld __cnfn convert_char16_sat(long16);\n" "char16 __ovld __cnfn convert_char16_rte(ulong16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(ulong16);\n" "char16 __ovld __cnfn convert_char16_rtz(ulong16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);\n" "char16 __ovld __cnfn convert_char16_rtp(ulong16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);\n" "char16 __ovld __cnfn convert_char16_rtn(ulong16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);\n" "char16 __ovld __cnfn convert_char16(ulong16);\n" "char16 __ovld __cnfn convert_char16_sat(ulong16);\n" "char16 __ovld __cnfn convert_char16_rte(float16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(float16);\n" "char16 __ovld __cnfn convert_char16_rtz(float16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(float16);\n" "char16 __ovld __cnfn convert_char16_rtp(float16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(float16);\n" "char16 __ovld __cnfn convert_char16_rtn(float16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(float16);\n" "char16 __ovld __cnfn convert_char16(float16);\n" "char16 __ovld __cnfn convert_char16_sat(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);\n" "uchar16 __ovld __cnfn convert_uchar16(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(char16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);\n" "uchar16 __ovld __cnfn convert_uchar16(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(short16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);\n" "uchar16 __ovld __cnfn convert_uchar16(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(int16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(uint16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);\n" "uchar16 __ovld __cnfn convert_uchar16(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(long16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);\n" "uchar16 __ovld __cnfn convert_uchar16(float16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(float16);\n" "short16 __ovld __cnfn convert_short16_rte(char16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(char16);\n" "short16 __ovld __cnfn convert_short16_rtz(char16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(char16);\n" "short16 __ovld __cnfn convert_short16_rtp(char16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(char16);\n" "short16 __ovld __cnfn convert_short16_rtn(char16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(char16);\n" "short16 __ovld __cnfn convert_short16(char16);\n" "short16 __ovld __cnfn convert_short16_sat(char16);\n" "short16 __ovld __cnfn convert_short16_rte(uchar16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(uchar16);\n" "short16 __ovld __cnfn convert_short16_rtz(uchar16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);\n" "short16 __ovld __cnfn convert_short16_rtp(uchar16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);\n" "short16 __ovld __cnfn convert_short16_rtn(uchar16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);\n" "short16 __ovld __cnfn convert_short16(uchar16);\n" "short16 __ovld __cnfn convert_short16_sat(uchar16);\n" "short16 __ovld __cnfn convert_short16_rte(short16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(short16);\n" "short16 __ovld __cnfn convert_short16_rtz(short16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(short16);\n" "short16 __ovld __cnfn convert_short16_rtp(short16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(short16);\n" "short16 __ovld __cnfn convert_short16_rtn(short16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(short16);\n" "short16 __ovld __cnfn convert_short16(short16);\n" "short16 __ovld __cnfn convert_short16_sat(short16);\n" "short16 __ovld __cnfn convert_short16_rte(ushort16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(ushort16);\n" "short16 __ovld __cnfn convert_short16_rtz(ushort16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);\n" "short16 __ovld __cnfn convert_short16_rtp(ushort16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);\n" "short16 __ovld __cnfn convert_short16_rtn(ushort16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);\n" "short16 __ovld __cnfn convert_short16(ushort16);\n" "short16 __ovld __cnfn convert_short16_sat(ushort16);\n" "short16 __ovld __cnfn convert_short16_rte(int16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(int16);\n" "short16 __ovld __cnfn convert_short16_rtz(int16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(int16);\n" "short16 __ovld __cnfn convert_short16_rtp(int16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(int16);\n" "short16 __ovld __cnfn convert_short16_rtn(int16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(int16);\n" "short16 __ovld __cnfn convert_short16(int16);\n" "short16 __ovld __cnfn convert_short16_sat(int16);\n" "short16 __ovld __cnfn convert_short16_rte(uint16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(uint16);\n" "short16 __ovld __cnfn convert_short16_rtz(uint16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(uint16);\n" "short16 __ovld __cnfn convert_short16_rtp(uint16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(uint16);\n" "short16 __ovld __cnfn convert_short16_rtn(uint16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(uint16);\n" "short16 __ovld __cnfn convert_short16(uint16);\n" "short16 __ovld __cnfn convert_short16_sat(uint16);\n" "short16 __ovld __cnfn convert_short16_rte(long16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(long16);\n" "short16 __ovld __cnfn convert_short16_rtz(long16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(long16);\n" "short16 __ovld __cnfn convert_short16_rtp(long16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(long16);\n" "short16 __ovld __cnfn convert_short16_rtn(long16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(long16);\n" "short16 __ovld __cnfn convert_short16(long16);\n" "short16 __ovld __cnfn convert_short16_sat(long16);\n" "short16 __ovld __cnfn convert_short16_rte(ulong16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(ulong16);\n" "short16 __ovld __cnfn convert_short16_rtz(ulong16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);\n" "short16 __ovld __cnfn convert_short16_rtp(ulong16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);\n" "short16 __ovld __cnfn convert_short16_rtn(ulong16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);\n" "short16 __ovld __cnfn convert_short16(ulong16);\n" "short16 __ovld __cnfn convert_short16_sat(ulong16);\n" "short16 __ovld __cnfn convert_short16_rte(float16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(float16);\n" "short16 __ovld __cnfn convert_short16_rtz(float16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(float16);\n" "short16 __ovld __cnfn convert_short16_rtp(float16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(float16);\n" "short16 __ovld __cnfn convert_short16_rtn(float16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(float16);\n" "short16 __ovld __cnfn convert_short16(float16);\n" "short16 __ovld __cnfn convert_short16_sat(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);\n" "ushort16 __ovld __cnfn convert_ushort16(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(char16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);\n" "ushort16 __ovld __cnfn convert_ushort16(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(short16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);\n" "ushort16 __ovld __cnfn convert_ushort16(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(int16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(uint16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);\n" "ushort16 __ovld __cnfn convert_ushort16(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(long16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);\n" "ushort16 __ovld __cnfn convert_ushort16(float16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(float16);\n" "int16 __ovld __cnfn convert_int16_rte(char16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(char16);\n" "int16 __ovld __cnfn convert_int16_rtz(char16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(char16);\n" "int16 __ovld __cnfn convert_int16_rtp(char16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(char16);\n" "int16 __ovld __cnfn convert_int16_rtn(char16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(char16);\n" "int16 __ovld __cnfn convert_int16(char16);\n" "int16 __ovld __cnfn convert_int16_sat(char16);\n" "int16 __ovld __cnfn convert_int16_rte(uchar16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(uchar16);\n" "int16 __ovld __cnfn convert_int16_rtz(uchar16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);\n" "int16 __ovld __cnfn convert_int16_rtp(uchar16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);\n" "int16 __ovld __cnfn convert_int16_rtn(uchar16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);\n" "int16 __ovld __cnfn convert_int16(uchar16);\n" "int16 __ovld __cnfn convert_int16_sat(uchar16);\n" "int16 __ovld __cnfn convert_int16_rte(short16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(short16);\n" "int16 __ovld __cnfn convert_int16_rtz(short16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(short16);\n" "int16 __ovld __cnfn convert_int16_rtp(short16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(short16);\n" "int16 __ovld __cnfn convert_int16_rtn(short16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(short16);\n" "int16 __ovld __cnfn convert_int16(short16);\n" "int16 __ovld __cnfn convert_int16_sat(short16);\n" "int16 __ovld __cnfn convert_int16_rte(ushort16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(ushort16);\n" "int16 __ovld __cnfn convert_int16_rtz(ushort16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);\n" "int16 __ovld __cnfn convert_int16_rtp(ushort16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);\n" "int16 __ovld __cnfn convert_int16_rtn(ushort16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);\n" "int16 __ovld __cnfn convert_int16(ushort16);\n" "int16 __ovld __cnfn convert_int16_sat(ushort16);\n" "int16 __ovld __cnfn convert_int16_rte(int16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(int16);\n" "int16 __ovld __cnfn convert_int16_rtz(int16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(int16);\n" "int16 __ovld __cnfn convert_int16_rtp(int16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(int16);\n" "int16 __ovld __cnfn convert_int16_rtn(int16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(int16);\n" "int16 __ovld __cnfn convert_int16(int16);\n" "int16 __ovld __cnfn convert_int16_sat(int16);\n" "int16 __ovld __cnfn convert_int16_rte(uint16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(uint16);\n" "int16 __ovld __cnfn convert_int16_rtz(uint16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(uint16);\n" "int16 __ovld __cnfn convert_int16_rtp(uint16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(uint16);\n" "int16 __ovld __cnfn convert_int16_rtn(uint16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(uint16);\n" "int16 __ovld __cnfn convert_int16(uint16);\n" "int16 __ovld __cnfn convert_int16_sat(uint16);\n" "int16 __ovld __cnfn convert_int16_rte(long16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(long16);\n" "int16 __ovld __cnfn convert_int16_rtz(long16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(long16);\n" "int16 __ovld __cnfn convert_int16_rtp(long16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(long16);\n" "int16 __ovld __cnfn convert_int16_rtn(long16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(long16);\n" "int16 __ovld __cnfn convert_int16(long16);\n" "int16 __ovld __cnfn convert_int16_sat(long16);\n" "int16 __ovld __cnfn convert_int16_rte(ulong16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(ulong16);\n" "int16 __ovld __cnfn convert_int16_rtz(ulong16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);\n" "int16 __ovld __cnfn convert_int16_rtp(ulong16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);\n" "int16 __ovld __cnfn convert_int16_rtn(ulong16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);\n" "int16 __ovld __cnfn convert_int16(ulong16);\n" "int16 __ovld __cnfn convert_int16_sat(ulong16);\n" "int16 __ovld __cnfn convert_int16_rte(float16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(float16);\n" "int16 __ovld __cnfn convert_int16_rtz(float16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(float16);\n" "int16 __ovld __cnfn convert_int16_rtp(float16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(float16);\n" "int16 __ovld __cnfn convert_int16_rtn(float16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(float16);\n" "int16 __ovld __cnfn convert_int16(float16);\n" "int16 __ovld __cnfn convert_int16_sat(float16);\n" "uint16 __ovld __cnfn convert_uint16_rte(char16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(char16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(char16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(char16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(char16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);\n" "uint16 __ovld __cnfn convert_uint16(char16);\n" "uint16 __ovld __cnfn convert_uint16_sat(char16);\n" "uint16 __ovld __cnfn convert_uint16_rte(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);\n" "uint16 __ovld __cnfn convert_uint16(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_sat(uchar16);\n" "uint16 __ovld __cnfn convert_uint16_rte(short16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(short16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(short16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(short16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(short16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);\n" "uint16 __ovld __cnfn convert_uint16(short16);\n" "uint16 __ovld __cnfn convert_uint16_sat(short16);\n" "uint16 __ovld __cnfn convert_uint16_rte(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);\n" "uint16 __ovld __cnfn convert_uint16(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_sat(ushort16);\n" "uint16 __ovld __cnfn convert_uint16_rte(int16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(int16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(int16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(int16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(int16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);\n" "uint16 __ovld __cnfn convert_uint16(int16);\n" "uint16 __ovld __cnfn convert_uint16_sat(int16);\n" "uint16 __ovld __cnfn convert_uint16_rte(uint16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(uint16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(uint16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(uint16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);\n" "uint16 __ovld __cnfn convert_uint16(uint16);\n" "uint16 __ovld __cnfn convert_uint16_sat(uint16);\n" "uint16 __ovld __cnfn convert_uint16_rte(long16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(long16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(long16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(long16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(long16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);\n" "uint16 __ovld __cnfn convert_uint16(long16);\n" "uint16 __ovld __cnfn convert_uint16_sat(long16);\n" "uint16 __ovld __cnfn convert_uint16_rte(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);\n" "uint16 __ovld __cnfn convert_uint16(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_sat(ulong16);\n" "uint16 __ovld __cnfn convert_uint16_rte(float16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(float16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(float16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(float16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(float16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);\n" "uint16 __ovld __cnfn convert_uint16(float16);\n" "uint16 __ovld __cnfn convert_uint16_sat(float16);\n" "long16 __ovld __cnfn convert_long16_rte(char16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(char16);\n" "long16 __ovld __cnfn convert_long16_rtz(char16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(char16);\n" "long16 __ovld __cnfn convert_long16_rtp(char16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(char16);\n" "long16 __ovld __cnfn convert_long16_rtn(char16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(char16);\n" "long16 __ovld __cnfn convert_long16(char16);\n" "long16 __ovld __cnfn convert_long16_sat(char16);\n" "long16 __ovld __cnfn convert_long16_rte(uchar16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(uchar16);\n" "long16 __ovld __cnfn convert_long16_rtz(uchar16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);\n" "long16 __ovld __cnfn convert_long16_rtp(uchar16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);\n" "long16 __ovld __cnfn convert_long16_rtn(uchar16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);\n" "long16 __ovld __cnfn convert_long16(uchar16);\n" "long16 __ovld __cnfn convert_long16_sat(uchar16);\n" "long16 __ovld __cnfn convert_long16_rte(short16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(short16);\n" "long16 __ovld __cnfn convert_long16_rtz(short16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(short16);\n" "long16 __ovld __cnfn convert_long16_rtp(short16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(short16);\n" "long16 __ovld __cnfn convert_long16_rtn(short16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(short16);\n" "long16 __ovld __cnfn convert_long16(short16);\n" "long16 __ovld __cnfn convert_long16_sat(short16);\n" "long16 __ovld __cnfn convert_long16_rte(ushort16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(ushort16);\n" "long16 __ovld __cnfn convert_long16_rtz(ushort16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);\n" "long16 __ovld __cnfn convert_long16_rtp(ushort16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);\n" "long16 __ovld __cnfn convert_long16_rtn(ushort16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);\n" "long16 __ovld __cnfn convert_long16(ushort16);\n" "long16 __ovld __cnfn convert_long16_sat(ushort16);\n" "long16 __ovld __cnfn convert_long16_rte(int16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(int16);\n" "long16 __ovld __cnfn convert_long16_rtz(int16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(int16);\n" "long16 __ovld __cnfn convert_long16_rtp(int16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(int16);\n" "long16 __ovld __cnfn convert_long16_rtn(int16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(int16);\n" "long16 __ovld __cnfn convert_long16(int16);\n" "long16 __ovld __cnfn convert_long16_sat(int16);\n" "long16 __ovld __cnfn convert_long16_rte(uint16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(uint16);\n" "long16 __ovld __cnfn convert_long16_rtz(uint16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(uint16);\n" "long16 __ovld __cnfn convert_long16_rtp(uint16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(uint16);\n" "long16 __ovld __cnfn convert_long16_rtn(uint16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(uint16);\n" "long16 __ovld __cnfn convert_long16(uint16);\n" "long16 __ovld __cnfn convert_long16_sat(uint16);\n" "long16 __ovld __cnfn convert_long16_rte(long16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(long16);\n" "long16 __ovld __cnfn convert_long16_rtz(long16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(long16);\n" "long16 __ovld __cnfn convert_long16_rtp(long16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(long16);\n" "long16 __ovld __cnfn convert_long16_rtn(long16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(long16);\n" "long16 __ovld __cnfn convert_long16(long16);\n" "long16 __ovld __cnfn convert_long16_sat(long16);\n" "long16 __ovld __cnfn convert_long16_rte(ulong16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(ulong16);\n" "long16 __ovld __cnfn convert_long16_rtz(ulong16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);\n" "long16 __ovld __cnfn convert_long16_rtp(ulong16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);\n" "long16 __ovld __cnfn convert_long16_rtn(ulong16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);\n" "long16 __ovld __cnfn convert_long16(ulong16);\n" "long16 __ovld __cnfn convert_long16_sat(ulong16);\n" "long16 __ovld __cnfn convert_long16_rte(float16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(float16);\n" "long16 __ovld __cnfn convert_long16_rtz(float16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(float16);\n" "long16 __ovld __cnfn convert_long16_rtp(float16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(float16);\n" "long16 __ovld __cnfn convert_long16_rtn(float16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(float16);\n" "long16 __ovld __cnfn convert_long16(float16);\n" "long16 __ovld __cnfn convert_long16_sat(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);\n" "ulong16 __ovld __cnfn convert_ulong16(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(char16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);\n" "ulong16 __ovld __cnfn convert_ulong16(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(short16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);\n" "ulong16 __ovld __cnfn convert_ulong16(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(int16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(uint16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);\n" "ulong16 __ovld __cnfn convert_ulong16(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(long16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);\n" "ulong16 __ovld __cnfn convert_ulong16(float16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(float16);\n" "float16 __ovld __cnfn convert_float16_rte(char16);\n" "float16 __ovld __cnfn convert_float16_rtz(char16);\n" "float16 __ovld __cnfn convert_float16_rtp(char16);\n" "float16 __ovld __cnfn convert_float16_rtn(char16);\n" "float16 __ovld __cnfn convert_float16(char16);\n" "float16 __ovld __cnfn convert_float16_rte(uchar16);\n" "float16 __ovld __cnfn convert_float16_rtz(uchar16);\n" "float16 __ovld __cnfn convert_float16_rtp(uchar16);\n" "float16 __ovld __cnfn convert_float16_rtn(uchar16);\n" "float16 __ovld __cnfn convert_float16(uchar16);\n" "float16 __ovld __cnfn convert_float16_rte(short16);\n" "float16 __ovld __cnfn convert_float16_rtz(short16);\n" "float16 __ovld __cnfn convert_float16_rtp(short16);\n" "float16 __ovld __cnfn convert_float16_rtn(short16);\n" "float16 __ovld __cnfn convert_float16(short16);\n" "float16 __ovld __cnfn convert_float16_rte(ushort16);\n" "float16 __ovld __cnfn convert_float16_rtz(ushort16);\n" "float16 __ovld __cnfn convert_float16_rtp(ushort16);\n" "float16 __ovld __cnfn convert_float16_rtn(ushort16);\n" "float16 __ovld __cnfn convert_float16(ushort16);\n" "float16 __ovld __cnfn convert_float16_rte(int16);\n" "float16 __ovld __cnfn convert_float16_rtz(int16);\n" "float16 __ovld __cnfn convert_float16_rtp(int16);\n" "float16 __ovld __cnfn convert_float16_rtn(int16);\n" "float16 __ovld __cnfn convert_float16(int16);\n" "float16 __ovld __cnfn convert_float16_rte(uint16);\n" "float16 __ovld __cnfn convert_float16_rtz(uint16);\n" "float16 __ovld __cnfn convert_float16_rtp(uint16);\n" "float16 __ovld __cnfn convert_float16_rtn(uint16);\n" "float16 __ovld __cnfn convert_float16(uint16);\n" "float16 __ovld __cnfn convert_float16_rte(long16);\n" "float16 __ovld __cnfn convert_float16_rtz(long16);\n" "float16 __ovld __cnfn convert_float16_rtp(long16);\n" "float16 __ovld __cnfn convert_float16_rtn(long16);\n" "float16 __ovld __cnfn convert_float16(long16);\n" "float16 __ovld __cnfn convert_float16_rte(ulong16);\n" "float16 __ovld __cnfn convert_float16_rtz(ulong16);\n" "float16 __ovld __cnfn convert_float16_rtp(ulong16);\n" "float16 __ovld __cnfn convert_float16_rtn(ulong16);\n" "float16 __ovld __cnfn convert_float16(ulong16);\n" "float16 __ovld __cnfn convert_float16_rte(float16);\n" "float16 __ovld __cnfn convert_float16_rtz(float16);\n" "float16 __ovld __cnfn convert_float16_rtp(float16);\n" "float16 __ovld __cnfn convert_float16_rtn(float16);\n" "float16 __ovld __cnfn convert_float16(float16);\n" "\n" "// Conversions with double data type parameters or return value.\n" "\n" "#ifdef cl_khr_fp64\n" "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n" "char __ovld __cnfn convert_char(double);\n" "char __ovld __cnfn convert_char_rte(double);\n" "char __ovld __cnfn convert_char_rtn(double);\n" "char __ovld __cnfn convert_char_rtp(double);\n" "char __ovld __cnfn convert_char_rtz(double);\n" "char __ovld __cnfn convert_char_sat(double);\n" "char __ovld __cnfn convert_char_sat_rte(double);\n" "char __ovld __cnfn convert_char_sat_rtn(double);\n" "char __ovld __cnfn convert_char_sat_rtp(double);\n" "char __ovld __cnfn convert_char_sat_rtz(double);\n" "char2 __ovld __cnfn convert_char2(double2);\n" "char2 __ovld __cnfn convert_char2_rte(double2);\n" "char2 __ovld __cnfn convert_char2_rtn(double2);\n" "char2 __ovld __cnfn convert_char2_rtp(double2);\n" "char2 __ovld __cnfn convert_char2_rtz(double2);\n" "char2 __ovld __cnfn convert_char2_sat(double2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(double2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(double2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(double2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(double2);\n" "char3 __ovld __cnfn convert_char3(double3);\n" "char3 __ovld __cnfn convert_char3_rte(double3);\n" "char3 __ovld __cnfn convert_char3_rtn(double3);\n" "char3 __ovld __cnfn convert_char3_rtp(double3);\n" "char3 __ovld __cnfn convert_char3_rtz(double3);\n" "char3 __ovld __cnfn convert_char3_sat(double3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(double3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(double3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(double3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(double3);\n" "char4 __ovld __cnfn convert_char4(double4);\n" "char4 __ovld __cnfn convert_char4_rte(double4);\n" "char4 __ovld __cnfn convert_char4_rtn(double4);\n" "char4 __ovld __cnfn convert_char4_rtp(double4);\n" "char4 __ovld __cnfn convert_char4_rtz(double4);\n" "char4 __ovld __cnfn convert_char4_sat(double4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(double4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(double4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(double4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(double4);\n" "char8 __ovld __cnfn convert_char8(double8);\n" "char8 __ovld __cnfn convert_char8_rte(double8);\n" "char8 __ovld __cnfn convert_char8_rtn(double8);\n" "char8 __ovld __cnfn convert_char8_rtp(double8);\n" "char8 __ovld __cnfn convert_char8_rtz(double8);\n" "char8 __ovld __cnfn convert_char8_sat(double8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(double8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(double8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(double8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(double8);\n" "char16 __ovld __cnfn convert_char16(double16);\n" "char16 __ovld __cnfn convert_char16_rte(double16);\n" "char16 __ovld __cnfn convert_char16_rtn(double16);\n" "char16 __ovld __cnfn convert_char16_rtp(double16);\n" "char16 __ovld __cnfn convert_char16_rtz(double16);\n" "char16 __ovld __cnfn convert_char16_sat(double16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(double16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(double16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(double16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(double16);\n" "\n" "uchar __ovld __cnfn convert_uchar(double);\n" "uchar __ovld __cnfn convert_uchar_rte(double);\n" "uchar __ovld __cnfn convert_uchar_rtn(double);\n" "uchar __ovld __cnfn convert_uchar_rtp(double);\n" "uchar __ovld __cnfn convert_uchar_rtz(double);\n" "uchar __ovld __cnfn convert_uchar_sat(double);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(double);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(double);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(double);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(double);\n" "uchar2 __ovld __cnfn convert_uchar2(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);\n" "uchar3 __ovld __cnfn convert_uchar3(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);\n" "uchar4 __ovld __cnfn convert_uchar4(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);\n" "uchar8 __ovld __cnfn convert_uchar8(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);\n" "uchar16 __ovld __cnfn convert_uchar16(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);\n" "\n" "short __ovld __cnfn convert_short(double);\n" "short __ovld __cnfn convert_short_rte(double);\n" "short __ovld __cnfn convert_short_rtn(double);\n" "short __ovld __cnfn convert_short_rtp(double);\n" "short __ovld __cnfn convert_short_rtz(double);\n" "short __ovld __cnfn convert_short_sat(double);\n" "short __ovld __cnfn convert_short_sat_rte(double);\n" "short __ovld __cnfn convert_short_sat_rtn(double);\n" "short __ovld __cnfn convert_short_sat_rtp(double);\n" "short __ovld __cnfn convert_short_sat_rtz(double);\n" "short2 __ovld __cnfn convert_short2(double2);\n" "short2 __ovld __cnfn convert_short2_rte(double2);\n" "short2 __ovld __cnfn convert_short2_rtn(double2);\n" "short2 __ovld __cnfn convert_short2_rtp(double2);\n" "short2 __ovld __cnfn convert_short2_rtz(double2);\n" "short2 __ovld __cnfn convert_short2_sat(double2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(double2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(double2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(double2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(double2);\n" "short3 __ovld __cnfn convert_short3(double3);\n" "short3 __ovld __cnfn convert_short3_rte(double3);\n" "short3 __ovld __cnfn convert_short3_rtn(double3);\n" "short3 __ovld __cnfn convert_short3_rtp(double3);\n" "short3 __ovld __cnfn convert_short3_rtz(double3);\n" "short3 __ovld __cnfn convert_short3_sat(double3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(double3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(double3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(double3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(double3);\n" "short4 __ovld __cnfn convert_short4(double4);\n" "short4 __ovld __cnfn convert_short4_rte(double4);\n" "short4 __ovld __cnfn convert_short4_rtn(double4);\n" "short4 __ovld __cnfn convert_short4_rtp(double4);\n" "short4 __ovld __cnfn convert_short4_rtz(double4);\n" "short4 __ovld __cnfn convert_short4_sat(double4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(double4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(double4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(double4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(double4);\n" "short8 __ovld __cnfn convert_short8(double8);\n" "short8 __ovld __cnfn convert_short8_rte(double8);\n" "short8 __ovld __cnfn convert_short8_rtn(double8);\n" "short8 __ovld __cnfn convert_short8_rtp(double8);\n" "short8 __ovld __cnfn convert_short8_rtz(double8);\n" "short8 __ovld __cnfn convert_short8_sat(double8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(double8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(double8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(double8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(double8);\n" "short16 __ovld __cnfn convert_short16(double16);\n" "short16 __ovld __cnfn convert_short16_rte(double16);\n" "short16 __ovld __cnfn convert_short16_rtn(double16);\n" "short16 __ovld __cnfn convert_short16_rtp(double16);\n" "short16 __ovld __cnfn convert_short16_rtz(double16);\n" "short16 __ovld __cnfn convert_short16_sat(double16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(double16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(double16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(double16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(double16);\n" "\n" "ushort __ovld __cnfn convert_ushort(double);\n" "ushort __ovld __cnfn convert_ushort_rte(double);\n" "ushort __ovld __cnfn convert_ushort_rtn(double);\n" "ushort __ovld __cnfn convert_ushort_rtp(double);\n" "ushort __ovld __cnfn convert_ushort_rtz(double);\n" "ushort __ovld __cnfn convert_ushort_sat(double);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(double);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(double);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(double);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(double);\n" "ushort2 __ovld __cnfn convert_ushort2(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);\n" "ushort3 __ovld __cnfn convert_ushort3(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);\n" "ushort4 __ovld __cnfn convert_ushort4(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);\n" "ushort8 __ovld __cnfn convert_ushort8(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);\n" "ushort16 __ovld __cnfn convert_ushort16(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);\n" "\n" "int __ovld __cnfn convert_int(double);\n" "int __ovld __cnfn convert_int_rte(double);\n" "int __ovld __cnfn convert_int_rtn(double);\n" "int __ovld __cnfn convert_int_rtp(double);\n" "int __ovld __cnfn convert_int_rtz(double);\n" "int __ovld __cnfn convert_int_sat(double);\n" "int __ovld __cnfn convert_int_sat_rte(double);\n" "int __ovld __cnfn convert_int_sat_rtn(double);\n" "int __ovld __cnfn convert_int_sat_rtp(double);\n" "int __ovld __cnfn convert_int_sat_rtz(double);\n" "int2 __ovld __cnfn convert_int2(double2);\n" "int2 __ovld __cnfn convert_int2_rte(double2);\n" "int2 __ovld __cnfn convert_int2_rtn(double2);\n" "int2 __ovld __cnfn convert_int2_rtp(double2);\n" "int2 __ovld __cnfn convert_int2_rtz(double2);\n" "int2 __ovld __cnfn convert_int2_sat(double2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(double2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(double2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(double2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(double2);\n" "int3 __ovld __cnfn convert_int3(double3);\n" "int3 __ovld __cnfn convert_int3_rte(double3);\n" "int3 __ovld __cnfn convert_int3_rtn(double3);\n" "int3 __ovld __cnfn convert_int3_rtp(double3);\n" "int3 __ovld __cnfn convert_int3_rtz(double3);\n" "int3 __ovld __cnfn convert_int3_sat(double3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(double3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(double3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(double3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(double3);\n" "int4 __ovld __cnfn convert_int4(double4);\n" "int4 __ovld __cnfn convert_int4_rte(double4);\n" "int4 __ovld __cnfn convert_int4_rtn(double4);\n" "int4 __ovld __cnfn convert_int4_rtp(double4);\n" "int4 __ovld __cnfn convert_int4_rtz(double4);\n" "int4 __ovld __cnfn convert_int4_sat(double4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(double4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(double4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(double4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(double4);\n" "int8 __ovld __cnfn convert_int8(double8);\n" "int8 __ovld __cnfn convert_int8_rte(double8);\n" "int8 __ovld __cnfn convert_int8_rtn(double8);\n" "int8 __ovld __cnfn convert_int8_rtp(double8);\n" "int8 __ovld __cnfn convert_int8_rtz(double8);\n" "int8 __ovld __cnfn convert_int8_sat(double8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(double8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(double8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(double8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(double8);\n" "int16 __ovld __cnfn convert_int16(double16);\n" "int16 __ovld __cnfn convert_int16_rte(double16);\n" "int16 __ovld __cnfn convert_int16_rtn(double16);\n" "int16 __ovld __cnfn convert_int16_rtp(double16);\n" "int16 __ovld __cnfn convert_int16_rtz(double16);\n" "int16 __ovld __cnfn convert_int16_sat(double16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(double16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(double16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(double16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(double16);\n" "\n" "uint __ovld __cnfn convert_uint(double);\n" "uint __ovld __cnfn convert_uint_rte(double);\n" "uint __ovld __cnfn convert_uint_rtn(double);\n" "uint __ovld __cnfn convert_uint_rtp(double);\n" "uint __ovld __cnfn convert_uint_rtz(double);\n" "uint __ovld __cnfn convert_uint_sat(double);\n" "uint __ovld __cnfn convert_uint_sat_rte(double);\n" "uint __ovld __cnfn convert_uint_sat_rtn(double);\n" "uint __ovld __cnfn convert_uint_sat_rtp(double);\n" "uint __ovld __cnfn convert_uint_sat_rtz(double);\n" "uint2 __ovld __cnfn convert_uint2(double2);\n" "uint2 __ovld __cnfn convert_uint2_rte(double2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(double2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(double2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(double2);\n" "uint2 __ovld __cnfn convert_uint2_sat(double2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(double2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);\n" "uint3 __ovld __cnfn convert_uint3(double3);\n" "uint3 __ovld __cnfn convert_uint3_rte(double3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(double3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(double3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(double3);\n" "uint3 __ovld __cnfn convert_uint3_sat(double3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(double3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);\n" "uint4 __ovld __cnfn convert_uint4(double4);\n" "uint4 __ovld __cnfn convert_uint4_rte(double4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(double4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(double4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(double4);\n" "uint4 __ovld __cnfn convert_uint4_sat(double4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(double4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);\n" "uint8 __ovld __cnfn convert_uint8(double8);\n" "uint8 __ovld __cnfn convert_uint8_rte(double8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(double8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(double8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(double8);\n" "uint8 __ovld __cnfn convert_uint8_sat(double8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(double8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);\n" "uint16 __ovld __cnfn convert_uint16(double16);\n" "uint16 __ovld __cnfn convert_uint16_rte(double16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(double16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(double16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(double16);\n" "uint16 __ovld __cnfn convert_uint16_sat(double16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(double16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);\n" "\n" "long __ovld __cnfn convert_long(double);\n" "long __ovld __cnfn convert_long_rte(double);\n" "long __ovld __cnfn convert_long_rtn(double);\n" "long __ovld __cnfn convert_long_rtp(double);\n" "long __ovld __cnfn convert_long_rtz(double);\n" "long __ovld __cnfn convert_long_sat(double);\n" "long __ovld __cnfn convert_long_sat_rte(double);\n" "long __ovld __cnfn convert_long_sat_rtn(double);\n" "long __ovld __cnfn convert_long_sat_rtp(double);\n" "long __ovld __cnfn convert_long_sat_rtz(double);\n" "long2 __ovld __cnfn convert_long2(double2);\n" "long2 __ovld __cnfn convert_long2_rte(double2);\n" "long2 __ovld __cnfn convert_long2_rtn(double2);\n" "long2 __ovld __cnfn convert_long2_rtp(double2);\n" "long2 __ovld __cnfn convert_long2_rtz(double2);\n" "long2 __ovld __cnfn convert_long2_sat(double2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(double2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(double2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(double2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(double2);\n" "long3 __ovld __cnfn convert_long3(double3);\n" "long3 __ovld __cnfn convert_long3_rte(double3);\n" "long3 __ovld __cnfn convert_long3_rtn(double3);\n" "long3 __ovld __cnfn convert_long3_rtp(double3);\n" "long3 __ovld __cnfn convert_long3_rtz(double3);\n" "long3 __ovld __cnfn convert_long3_sat(double3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(double3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(double3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(double3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(double3);\n" "long4 __ovld __cnfn convert_long4(double4);\n" "long4 __ovld __cnfn convert_long4_rte(double4);\n" "long4 __ovld __cnfn convert_long4_rtn(double4);\n" "long4 __ovld __cnfn convert_long4_rtp(double4);\n" "long4 __ovld __cnfn convert_long4_rtz(double4);\n" "long4 __ovld __cnfn convert_long4_sat(double4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(double4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(double4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(double4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(double4);\n" "long8 __ovld __cnfn convert_long8(double8);\n" "long8 __ovld __cnfn convert_long8_rte(double8);\n" "long8 __ovld __cnfn convert_long8_rtn(double8);\n" "long8 __ovld __cnfn convert_long8_rtp(double8);\n" "long8 __ovld __cnfn convert_long8_rtz(double8);\n" "long8 __ovld __cnfn convert_long8_sat(double8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(double8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(double8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(double8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(double8);\n" "long16 __ovld __cnfn convert_long16(double16);\n" "long16 __ovld __cnfn convert_long16_rte(double16);\n" "long16 __ovld __cnfn convert_long16_rtn(double16);\n" "long16 __ovld __cnfn convert_long16_rtp(double16);\n" "long16 __ovld __cnfn convert_long16_rtz(double16);\n" "long16 __ovld __cnfn convert_long16_sat(double16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(double16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(double16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(double16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(double16);\n" "\n" "ulong __ovld __cnfn convert_ulong(double);\n" "ulong __ovld __cnfn convert_ulong_rte(double);\n" "ulong __ovld __cnfn convert_ulong_rtn(double);\n" "ulong __ovld __cnfn convert_ulong_rtp(double);\n" "ulong __ovld __cnfn convert_ulong_rtz(double);\n" "ulong __ovld __cnfn convert_ulong_sat(double);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(double);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(double);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(double);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(double);\n" "ulong2 __ovld __cnfn convert_ulong2(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);\n" "ulong3 __ovld __cnfn convert_ulong3(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);\n" "ulong4 __ovld __cnfn convert_ulong4(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);\n" "ulong8 __ovld __cnfn convert_ulong8(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);\n" "ulong16 __ovld __cnfn convert_ulong16(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);\n" "\n" "float __ovld __cnfn convert_float(double);\n" "float __ovld __cnfn convert_float_rte(double);\n" "float __ovld __cnfn convert_float_rtn(double);\n" "float __ovld __cnfn convert_float_rtp(double);\n" "float __ovld __cnfn convert_float_rtz(double);\n" "float2 __ovld __cnfn convert_float2(double2);\n" "float2 __ovld __cnfn convert_float2_rte(double2);\n" "float2 __ovld __cnfn convert_float2_rtn(double2);\n" "float2 __ovld __cnfn convert_float2_rtp(double2);\n" "float2 __ovld __cnfn convert_float2_rtz(double2);\n" "float3 __ovld __cnfn convert_float3(double3);\n" "float3 __ovld __cnfn convert_float3_rte(double3);\n" "float3 __ovld __cnfn convert_float3_rtn(double3);\n" "float3 __ovld __cnfn convert_float3_rtp(double3);\n" "float3 __ovld __cnfn convert_float3_rtz(double3);\n" "float4 __ovld __cnfn convert_float4(double4);\n" "float4 __ovld __cnfn convert_float4_rte(double4);\n" "float4 __ovld __cnfn convert_float4_rtn(double4);\n" "float4 __ovld __cnfn convert_float4_rtp(double4);\n" "float4 __ovld __cnfn convert_float4_rtz(double4);\n" "float8 __ovld __cnfn convert_float8(double8);\n" "float8 __ovld __cnfn convert_float8_rte(double8);\n" "float8 __ovld __cnfn convert_float8_rtn(double8);\n" "float8 __ovld __cnfn convert_float8_rtp(double8);\n" "float8 __ovld __cnfn convert_float8_rtz(double8);\n" "float16 __ovld __cnfn convert_float16(double16);\n" "float16 __ovld __cnfn convert_float16_rte(double16);\n" "float16 __ovld __cnfn convert_float16_rtn(double16);\n" "float16 __ovld __cnfn convert_float16_rtp(double16);\n" "float16 __ovld __cnfn convert_float16_rtz(double16);\n" "\n" "double __ovld __cnfn convert_double(char);\n" "double __ovld __cnfn convert_double(double);\n" "double __ovld __cnfn convert_double(float);\n" "double __ovld __cnfn convert_double(int);\n" "double __ovld __cnfn convert_double(long);\n" "double __ovld __cnfn convert_double(short);\n" "double __ovld __cnfn convert_double(uchar);\n" "double __ovld __cnfn convert_double(uint);\n" "double __ovld __cnfn convert_double(ulong);\n" "double __ovld __cnfn convert_double(ushort);\n" "double __ovld __cnfn convert_double_rte(char);\n" "double __ovld __cnfn convert_double_rte(double);\n" "double __ovld __cnfn convert_double_rte(float);\n" "double __ovld __cnfn convert_double_rte(int);\n" "double __ovld __cnfn convert_double_rte(long);\n" "double __ovld __cnfn convert_double_rte(short);\n" "double __ovld __cnfn convert_double_rte(uchar);\n" "double __ovld __cnfn convert_double_rte(uint);\n" "double __ovld __cnfn convert_double_rte(ulong);\n" "double __ovld __cnfn convert_double_rte(ushort);\n" "double __ovld __cnfn convert_double_rtn(char);\n" "double __ovld __cnfn convert_double_rtn(double);\n" "double __ovld __cnfn convert_double_rtn(float);\n" "double __ovld __cnfn convert_double_rtn(int);\n" "double __ovld __cnfn convert_double_rtn(long);\n" "double __ovld __cnfn convert_double_rtn(short);\n" "double __ovld __cnfn convert_double_rtn(uchar);\n" "double __ovld __cnfn convert_double_rtn(uint);\n" "double __ovld __cnfn convert_double_rtn(ulong);\n" "double __ovld __cnfn convert_double_rtn(ushort);\n" "double __ovld __cnfn convert_double_rtp(char);\n" "double __ovld __cnfn convert_double_rtp(double);\n" "double __ovld __cnfn convert_double_rtp(float);\n" "double __ovld __cnfn convert_double_rtp(int);\n" "double __ovld __cnfn convert_double_rtp(long);\n" "double __ovld __cnfn convert_double_rtp(short);\n" "double __ovld __cnfn convert_double_rtp(uchar);\n" "double __ovld __cnfn convert_double_rtp(uint);\n" "double __ovld __cnfn convert_double_rtp(ulong);\n" "double __ovld __cnfn convert_double_rtp(ushort);\n" "double __ovld __cnfn convert_double_rtz(char);\n" "double __ovld __cnfn convert_double_rtz(double);\n" "double __ovld __cnfn convert_double_rtz(float);\n" "double __ovld __cnfn convert_double_rtz(int);\n" "double __ovld __cnfn convert_double_rtz(long);\n" "double __ovld __cnfn convert_double_rtz(short);\n" "double __ovld __cnfn convert_double_rtz(uchar);\n" "double __ovld __cnfn convert_double_rtz(uint);\n" "double __ovld __cnfn convert_double_rtz(ulong);\n" "double __ovld __cnfn convert_double_rtz(ushort);\n" "double2 __ovld __cnfn convert_double2(char2);\n" "double2 __ovld __cnfn convert_double2(double2);\n" "double2 __ovld __cnfn convert_double2(float2);\n" "double2 __ovld __cnfn convert_double2(int2);\n" "double2 __ovld __cnfn convert_double2(long2);\n" "double2 __ovld __cnfn convert_double2(short2);\n" "double2 __ovld __cnfn convert_double2(uchar2);\n" "double2 __ovld __cnfn convert_double2(uint2);\n" "double2 __ovld __cnfn convert_double2(ulong2);\n" "double2 __ovld __cnfn convert_double2(ushort2);\n" "double2 __ovld __cnfn convert_double2_rte(char2);\n" "double2 __ovld __cnfn convert_double2_rte(double2);\n" "double2 __ovld __cnfn convert_double2_rte(float2);\n" "double2 __ovld __cnfn convert_double2_rte(int2);\n" "double2 __ovld __cnfn convert_double2_rte(long2);\n" "double2 __ovld __cnfn convert_double2_rte(short2);\n" "double2 __ovld __cnfn convert_double2_rte(uchar2);\n" "double2 __ovld __cnfn convert_double2_rte(uint2);\n" "double2 __ovld __cnfn convert_double2_rte(ulong2);\n" "double2 __ovld __cnfn convert_double2_rte(ushort2);\n" "double2 __ovld __cnfn convert_double2_rtn(char2);\n" "double2 __ovld __cnfn convert_double2_rtn(double2);\n" "double2 __ovld __cnfn convert_double2_rtn(float2);\n" "double2 __ovld __cnfn convert_double2_rtn(int2);\n" "double2 __ovld __cnfn convert_double2_rtn(long2);\n" "double2 __ovld __cnfn convert_double2_rtn(short2);\n" "double2 __ovld __cnfn convert_double2_rtn(uchar2);\n" "double2 __ovld __cnfn convert_double2_rtn(uint2);\n" "double2 __ovld __cnfn convert_double2_rtn(ulong2);\n" "double2 __ovld __cnfn convert_double2_rtn(ushort2);\n" "double2 __ovld __cnfn convert_double2_rtp(char2);\n" "double2 __ovld __cnfn convert_double2_rtp(double2);\n" "double2 __ovld __cnfn convert_double2_rtp(float2);\n" "double2 __ovld __cnfn convert_double2_rtp(int2);\n" "double2 __ovld __cnfn convert_double2_rtp(long2);\n" "double2 __ovld __cnfn convert_double2_rtp(short2);\n" "double2 __ovld __cnfn convert_double2_rtp(uchar2);\n" "double2 __ovld __cnfn convert_double2_rtp(uint2);\n" "double2 __ovld __cnfn convert_double2_rtp(ulong2);\n" "double2 __ovld __cnfn convert_double2_rtp(ushort2);\n" "double2 __ovld __cnfn convert_double2_rtz(char2);\n" "double2 __ovld __cnfn convert_double2_rtz(double2);\n" "double2 __ovld __cnfn convert_double2_rtz(float2);\n" "double2 __ovld __cnfn convert_double2_rtz(int2);\n" "double2 __ovld __cnfn convert_double2_rtz(long2);\n" "double2 __ovld __cnfn convert_double2_rtz(short2);\n" "double2 __ovld __cnfn convert_double2_rtz(uchar2);\n" "double2 __ovld __cnfn convert_double2_rtz(uint2);\n" "double2 __ovld __cnfn convert_double2_rtz(ulong2);\n" "double2 __ovld __cnfn convert_double2_rtz(ushort2);\n" "double3 __ovld __cnfn convert_double3(char3);\n" "double3 __ovld __cnfn convert_double3(double3);\n" "double3 __ovld __cnfn convert_double3(float3);\n" "double3 __ovld __cnfn convert_double3(int3);\n" "double3 __ovld __cnfn convert_double3(long3);\n" "double3 __ovld __cnfn convert_double3(short3);\n" "double3 __ovld __cnfn convert_double3(uchar3);\n" "double3 __ovld __cnfn convert_double3(uint3);\n" "double3 __ovld __cnfn convert_double3(ulong3);\n" "double3 __ovld __cnfn convert_double3(ushort3);\n" "double3 __ovld __cnfn convert_double3_rte(char3);\n" "double3 __ovld __cnfn convert_double3_rte(double3);\n" "double3 __ovld __cnfn convert_double3_rte(float3);\n" "double3 __ovld __cnfn convert_double3_rte(int3);\n" "double3 __ovld __cnfn convert_double3_rte(long3);\n" "double3 __ovld __cnfn convert_double3_rte(short3);\n" "double3 __ovld __cnfn convert_double3_rte(uchar3);\n" "double3 __ovld __cnfn convert_double3_rte(uint3);\n" "double3 __ovld __cnfn convert_double3_rte(ulong3);\n" "double3 __ovld __cnfn convert_double3_rte(ushort3);\n" "double3 __ovld __cnfn convert_double3_rtn(char3);\n" "double3 __ovld __cnfn convert_double3_rtn(double3);\n" "double3 __ovld __cnfn convert_double3_rtn(float3);\n" "double3 __ovld __cnfn convert_double3_rtn(int3);\n" "double3 __ovld __cnfn convert_double3_rtn(long3);\n" "double3 __ovld __cnfn convert_double3_rtn(short3);\n" "double3 __ovld __cnfn convert_double3_rtn(uchar3);\n" "double3 __ovld __cnfn convert_double3_rtn(uint3);\n" "double3 __ovld __cnfn convert_double3_rtn(ulong3);\n" "double3 __ovld __cnfn convert_double3_rtn(ushort3);\n" "double3 __ovld __cnfn convert_double3_rtp(char3);\n" "double3 __ovld __cnfn convert_double3_rtp(double3);\n" "double3 __ovld __cnfn convert_double3_rtp(float3);\n" "double3 __ovld __cnfn convert_double3_rtp(int3);\n" "double3 __ovld __cnfn convert_double3_rtp(long3);\n" "double3 __ovld __cnfn convert_double3_rtp(short3);\n" "double3 __ovld __cnfn convert_double3_rtp(uchar3);\n" "double3 __ovld __cnfn convert_double3_rtp(uint3);\n" "double3 __ovld __cnfn convert_double3_rtp(ulong3);\n" "double3 __ovld __cnfn convert_double3_rtp(ushort3);\n" "double3 __ovld __cnfn convert_double3_rtz(char3);\n" "double3 __ovld __cnfn convert_double3_rtz(double3);\n" "double3 __ovld __cnfn convert_double3_rtz(float3);\n" "double3 __ovld __cnfn convert_double3_rtz(int3);\n" "double3 __ovld __cnfn convert_double3_rtz(long3);\n" "double3 __ovld __cnfn convert_double3_rtz(short3);\n" "double3 __ovld __cnfn convert_double3_rtz(uchar3);\n" "double3 __ovld __cnfn convert_double3_rtz(uint3);\n" "double3 __ovld __cnfn convert_double3_rtz(ulong3);\n" "double3 __ovld __cnfn convert_double3_rtz(ushort3);\n" "double4 __ovld __cnfn convert_double4(char4);\n" "double4 __ovld __cnfn convert_double4(double4);\n" "double4 __ovld __cnfn convert_double4(float4);\n" "double4 __ovld __cnfn convert_double4(int4);\n" "double4 __ovld __cnfn convert_double4(long4);\n" "double4 __ovld __cnfn convert_double4(short4);\n" "double4 __ovld __cnfn convert_double4(uchar4);\n" "double4 __ovld __cnfn convert_double4(uint4);\n" "double4 __ovld __cnfn convert_double4(ulong4);\n" "double4 __ovld __cnfn convert_double4(ushort4);\n" "double4 __ovld __cnfn convert_double4_rte(char4);\n" "double4 __ovld __cnfn convert_double4_rte(double4);\n" "double4 __ovld __cnfn convert_double4_rte(float4);\n" "double4 __ovld __cnfn convert_double4_rte(int4);\n" "double4 __ovld __cnfn convert_double4_rte(long4);\n" "double4 __ovld __cnfn convert_double4_rte(short4);\n" "double4 __ovld __cnfn convert_double4_rte(uchar4);\n" "double4 __ovld __cnfn convert_double4_rte(uint4);\n" "double4 __ovld __cnfn convert_double4_rte(ulong4);\n" "double4 __ovld __cnfn convert_double4_rte(ushort4);\n" "double4 __ovld __cnfn convert_double4_rtn(char4);\n" "double4 __ovld __cnfn convert_double4_rtn(double4);\n" "double4 __ovld __cnfn convert_double4_rtn(float4);\n" "double4 __ovld __cnfn convert_double4_rtn(int4);\n" "double4 __ovld __cnfn convert_double4_rtn(long4);\n" "double4 __ovld __cnfn convert_double4_rtn(short4);\n" "double4 __ovld __cnfn convert_double4_rtn(uchar4);\n" "double4 __ovld __cnfn convert_double4_rtn(uint4);\n" "double4 __ovld __cnfn convert_double4_rtn(ulong4);\n" "double4 __ovld __cnfn convert_double4_rtn(ushort4);\n" "double4 __ovld __cnfn convert_double4_rtp(char4);\n" "double4 __ovld __cnfn convert_double4_rtp(double4);\n" "double4 __ovld __cnfn convert_double4_rtp(float4);\n" "double4 __ovld __cnfn convert_double4_rtp(int4);\n" "double4 __ovld __cnfn convert_double4_rtp(long4);\n" "double4 __ovld __cnfn convert_double4_rtp(short4);\n" "double4 __ovld __cnfn convert_double4_rtp(uchar4);\n" "double4 __ovld __cnfn convert_double4_rtp(uint4);\n" "double4 __ovld __cnfn convert_double4_rtp(ulong4);\n" "double4 __ovld __cnfn convert_double4_rtp(ushort4);\n" "double4 __ovld __cnfn convert_double4_rtz(char4);\n" "double4 __ovld __cnfn convert_double4_rtz(double4);\n" "double4 __ovld __cnfn convert_double4_rtz(float4);\n" "double4 __ovld __cnfn convert_double4_rtz(int4);\n" "double4 __ovld __cnfn convert_double4_rtz(long4);\n" "double4 __ovld __cnfn convert_double4_rtz(short4);\n" "double4 __ovld __cnfn convert_double4_rtz(uchar4);\n" "double4 __ovld __cnfn convert_double4_rtz(uint4);\n" "double4 __ovld __cnfn convert_double4_rtz(ulong4);\n" "double4 __ovld __cnfn convert_double4_rtz(ushort4);\n" "double8 __ovld __cnfn convert_double8(char8);\n" "double8 __ovld __cnfn convert_double8(double8);\n" "double8 __ovld __cnfn convert_double8(float8);\n" "double8 __ovld __cnfn convert_double8(int8);\n" "double8 __ovld __cnfn convert_double8(long8);\n" "double8 __ovld __cnfn convert_double8(short8);\n" "double8 __ovld __cnfn convert_double8(uchar8);\n" "double8 __ovld __cnfn convert_double8(uint8);\n" "double8 __ovld __cnfn convert_double8(ulong8);\n" "double8 __ovld __cnfn convert_double8(ushort8);\n" "double8 __ovld __cnfn convert_double8_rte(char8);\n" "double8 __ovld __cnfn convert_double8_rte(double8);\n" "double8 __ovld __cnfn convert_double8_rte(float8);\n" "double8 __ovld __cnfn convert_double8_rte(int8);\n" "double8 __ovld __cnfn convert_double8_rte(long8);\n" "double8 __ovld __cnfn convert_double8_rte(short8);\n" "double8 __ovld __cnfn convert_double8_rte(uchar8);\n" "double8 __ovld __cnfn convert_double8_rte(uint8);\n" "double8 __ovld __cnfn convert_double8_rte(ulong8);\n" "double8 __ovld __cnfn convert_double8_rte(ushort8);\n" "double8 __ovld __cnfn convert_double8_rtn(char8);\n" "double8 __ovld __cnfn convert_double8_rtn(double8);\n" "double8 __ovld __cnfn convert_double8_rtn(float8);\n" "double8 __ovld __cnfn convert_double8_rtn(int8);\n" "double8 __ovld __cnfn convert_double8_rtn(long8);\n" "double8 __ovld __cnfn convert_double8_rtn(short8);\n" "double8 __ovld __cnfn convert_double8_rtn(uchar8);\n" "double8 __ovld __cnfn convert_double8_rtn(uint8);\n" "double8 __ovld __cnfn convert_double8_rtn(ulong8);\n" "double8 __ovld __cnfn convert_double8_rtn(ushort8);\n" "double8 __ovld __cnfn convert_double8_rtp(char8);\n" "double8 __ovld __cnfn convert_double8_rtp(double8);\n" "double8 __ovld __cnfn convert_double8_rtp(float8);\n" "double8 __ovld __cnfn convert_double8_rtp(int8);\n" "double8 __ovld __cnfn convert_double8_rtp(long8);\n" "double8 __ovld __cnfn convert_double8_rtp(short8);\n" "double8 __ovld __cnfn convert_double8_rtp(uchar8);\n" "double8 __ovld __cnfn convert_double8_rtp(uint8);\n" "double8 __ovld __cnfn convert_double8_rtp(ulong8);\n" "double8 __ovld __cnfn convert_double8_rtp(ushort8);\n" "double8 __ovld __cnfn convert_double8_rtz(char8);\n" "double8 __ovld __cnfn convert_double8_rtz(double8);\n" "double8 __ovld __cnfn convert_double8_rtz(float8);\n" "double8 __ovld __cnfn convert_double8_rtz(int8);\n" "double8 __ovld __cnfn convert_double8_rtz(long8);\n" "double8 __ovld __cnfn convert_double8_rtz(short8);\n" "double8 __ovld __cnfn convert_double8_rtz(uchar8);\n" "double8 __ovld __cnfn convert_double8_rtz(uint8);\n" "double8 __ovld __cnfn convert_double8_rtz(ulong8);\n" "double8 __ovld __cnfn convert_double8_rtz(ushort8);\n" "double16 __ovld __cnfn convert_double16(char16);\n" "double16 __ovld __cnfn convert_double16(double16);\n" "double16 __ovld __cnfn convert_double16(float16);\n" "double16 __ovld __cnfn convert_double16(int16);\n" "double16 __ovld __cnfn convert_double16(long16);\n" "double16 __ovld __cnfn convert_double16(short16);\n" "double16 __ovld __cnfn convert_double16(uchar16);\n" "double16 __ovld __cnfn convert_double16(uint16);\n" "double16 __ovld __cnfn convert_double16(ulong16);\n" "double16 __ovld __cnfn convert_double16(ushort16);\n" "double16 __ovld __cnfn convert_double16_rte(char16);\n" "double16 __ovld __cnfn convert_double16_rte(double16);\n" "double16 __ovld __cnfn convert_double16_rte(float16);\n" "double16 __ovld __cnfn convert_double16_rte(int16);\n" "double16 __ovld __cnfn convert_double16_rte(long16);\n" "double16 __ovld __cnfn convert_double16_rte(short16);\n" "double16 __ovld __cnfn convert_double16_rte(uchar16);\n" "double16 __ovld __cnfn convert_double16_rte(uint16);\n" "double16 __ovld __cnfn convert_double16_rte(ulong16);\n" "double16 __ovld __cnfn convert_double16_rte(ushort16);\n" "double16 __ovld __cnfn convert_double16_rtn(char16);\n" "double16 __ovld __cnfn convert_double16_rtn(double16);\n" "double16 __ovld __cnfn convert_double16_rtn(float16);\n" "double16 __ovld __cnfn convert_double16_rtn(int16);\n" "double16 __ovld __cnfn convert_double16_rtn(long16);\n" "double16 __ovld __cnfn convert_double16_rtn(short16);\n" "double16 __ovld __cnfn convert_double16_rtn(uchar16);\n" "double16 __ovld __cnfn convert_double16_rtn(uint16);\n" "double16 __ovld __cnfn convert_double16_rtn(ulong16);\n" "double16 __ovld __cnfn convert_double16_rtn(ushort16);\n" "double16 __ovld __cnfn convert_double16_rtp(char16);\n" "double16 __ovld __cnfn convert_double16_rtp(double16);\n" "double16 __ovld __cnfn convert_double16_rtp(float16);\n" "double16 __ovld __cnfn convert_double16_rtp(int16);\n" "double16 __ovld __cnfn convert_double16_rtp(long16);\n" "double16 __ovld __cnfn convert_double16_rtp(short16);\n" "double16 __ovld __cnfn convert_double16_rtp(uchar16);\n" "double16 __ovld __cnfn convert_double16_rtp(uint16);\n" "double16 __ovld __cnfn convert_double16_rtp(ulong16);\n" "double16 __ovld __cnfn convert_double16_rtp(ushort16);\n" "double16 __ovld __cnfn convert_double16_rtz(char16);\n" "double16 __ovld __cnfn convert_double16_rtz(double16);\n" "double16 __ovld __cnfn convert_double16_rtz(float16);\n" "double16 __ovld __cnfn convert_double16_rtz(int16);\n" "double16 __ovld __cnfn convert_double16_rtz(long16);\n" "double16 __ovld __cnfn convert_double16_rtz(short16);\n" "double16 __ovld __cnfn convert_double16_rtz(uchar16);\n" "double16 __ovld __cnfn convert_double16_rtz(uint16);\n" "double16 __ovld __cnfn convert_double16_rtz(ulong16);\n" "double16 __ovld __cnfn convert_double16_rtz(ushort16);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n" "// Convert half types to non-double types.\n" "uchar __ovld __cnfn convert_uchar(half);\n" "uchar __ovld __cnfn convert_uchar_rte(half);\n" "uchar __ovld __cnfn convert_uchar_rtp(half);\n" "uchar __ovld __cnfn convert_uchar_rtn(half);\n" "uchar __ovld __cnfn convert_uchar_rtz(half);\n" "uchar __ovld __cnfn convert_uchar_sat(half);\n" "uchar __ovld __cnfn convert_uchar_sat_rte(half);\n" "uchar __ovld __cnfn convert_uchar_sat_rtp(half);\n" "uchar __ovld __cnfn convert_uchar_sat_rtn(half);\n" "uchar __ovld __cnfn convert_uchar_sat_rtz(half);\n" "uchar2 __ovld __cnfn convert_uchar2(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_rte(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtp(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtn(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_rtz(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);\n" "uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);\n" "uchar3 __ovld __cnfn convert_uchar3(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_rte(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtp(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtn(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_rtz(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);\n" "uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);\n" "uchar4 __ovld __cnfn convert_uchar4(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_rte(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtp(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtn(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_rtz(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);\n" "uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);\n" "uchar8 __ovld __cnfn convert_uchar8(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_rte(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtp(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtn(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_rtz(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);\n" "uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);\n" "uchar16 __ovld __cnfn convert_uchar16(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_rte(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtp(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtn(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_rtz(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);\n" "uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);\n" "ushort __ovld __cnfn convert_ushort(half);\n" "ushort __ovld __cnfn convert_ushort_rte(half);\n" "ushort __ovld __cnfn convert_ushort_rtp(half);\n" "ushort __ovld __cnfn convert_ushort_rtn(half);\n" "ushort __ovld __cnfn convert_ushort_rtz(half);\n" "ushort __ovld __cnfn convert_ushort_sat(half);\n" "ushort __ovld __cnfn convert_ushort_sat_rte(half);\n" "ushort __ovld __cnfn convert_ushort_sat_rtp(half);\n" "ushort __ovld __cnfn convert_ushort_sat_rtn(half);\n" "ushort __ovld __cnfn convert_ushort_sat_rtz(half);\n" "ushort2 __ovld __cnfn convert_ushort2(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_rte(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtp(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtn(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_rtz(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);\n" "ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);\n" "ushort3 __ovld __cnfn convert_ushort3(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_rte(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtp(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtn(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_rtz(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);\n" "ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);\n" "ushort4 __ovld __cnfn convert_ushort4(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_rte(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtp(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtn(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_rtz(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);\n" "ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);\n" "ushort8 __ovld __cnfn convert_ushort8(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_rte(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtp(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtn(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_rtz(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);\n" "ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);\n" "ushort16 __ovld __cnfn convert_ushort16(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_rte(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtp(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtn(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_rtz(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);\n" "ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);\n" "uint __ovld __cnfn convert_uint(half);\n" "uint __ovld __cnfn convert_uint_rte(half);\n" "uint __ovld __cnfn convert_uint_rtp(half);\n" "uint __ovld __cnfn convert_uint_rtn(half);\n" "uint __ovld __cnfn convert_uint_rtz(half);\n" "uint __ovld __cnfn convert_uint_sat(half);\n" "uint __ovld __cnfn convert_uint_sat_rte(half);\n" "uint __ovld __cnfn convert_uint_sat_rtp(half);\n" "uint __ovld __cnfn convert_uint_sat_rtn(half);\n" "uint __ovld __cnfn convert_uint_sat_rtz(half);\n" "uint2 __ovld __cnfn convert_uint2(half2);\n" "uint2 __ovld __cnfn convert_uint2_rte(half2);\n" "uint2 __ovld __cnfn convert_uint2_rtp(half2);\n" "uint2 __ovld __cnfn convert_uint2_rtn(half2);\n" "uint2 __ovld __cnfn convert_uint2_rtz(half2);\n" "uint2 __ovld __cnfn convert_uint2_sat(half2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rte(half2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);\n" "uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);\n" "uint3 __ovld __cnfn convert_uint3(half3);\n" "uint3 __ovld __cnfn convert_uint3_rte(half3);\n" "uint3 __ovld __cnfn convert_uint3_rtp(half3);\n" "uint3 __ovld __cnfn convert_uint3_rtn(half3);\n" "uint3 __ovld __cnfn convert_uint3_rtz(half3);\n" "uint3 __ovld __cnfn convert_uint3_sat(half3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rte(half3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);\n" "uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);\n" "uint4 __ovld __cnfn convert_uint4(half4);\n" "uint4 __ovld __cnfn convert_uint4_rte(half4);\n" "uint4 __ovld __cnfn convert_uint4_rtp(half4);\n" "uint4 __ovld __cnfn convert_uint4_rtn(half4);\n" "uint4 __ovld __cnfn convert_uint4_rtz(half4);\n" "uint4 __ovld __cnfn convert_uint4_sat(half4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rte(half4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);\n" "uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);\n" "uint8 __ovld __cnfn convert_uint8(half8);\n" "uint8 __ovld __cnfn convert_uint8_rte(half8);\n" "uint8 __ovld __cnfn convert_uint8_rtp(half8);\n" "uint8 __ovld __cnfn convert_uint8_rtn(half8);\n" "uint8 __ovld __cnfn convert_uint8_rtz(half8);\n" "uint8 __ovld __cnfn convert_uint8_sat(half8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rte(half8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);\n" "uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);\n" "uint16 __ovld __cnfn convert_uint16(half16);\n" "uint16 __ovld __cnfn convert_uint16_rte(half16);\n" "uint16 __ovld __cnfn convert_uint16_rtp(half16);\n" "uint16 __ovld __cnfn convert_uint16_rtn(half16);\n" "uint16 __ovld __cnfn convert_uint16_rtz(half16);\n" "uint16 __ovld __cnfn convert_uint16_sat(half16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rte(half16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);\n" "uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);\n" "ulong __ovld __cnfn convert_ulong(half);\n" "ulong __ovld __cnfn convert_ulong_rte(half);\n" "ulong __ovld __cnfn convert_ulong_rtp(half);\n" "ulong __ovld __cnfn convert_ulong_rtn(half);\n" "ulong __ovld __cnfn convert_ulong_rtz(half);\n" "ulong __ovld __cnfn convert_ulong_sat(half);\n" "ulong __ovld __cnfn convert_ulong_sat_rte(half);\n" "ulong __ovld __cnfn convert_ulong_sat_rtp(half);\n" "ulong __ovld __cnfn convert_ulong_sat_rtn(half);\n" "ulong __ovld __cnfn convert_ulong_sat_rtz(half);\n" "ulong2 __ovld __cnfn convert_ulong2(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_rte(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtp(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtn(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_rtz(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);\n" "ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);\n" "ulong3 __ovld __cnfn convert_ulong3(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_rte(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtp(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtn(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_rtz(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);\n" "ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);\n" "ulong4 __ovld __cnfn convert_ulong4(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_rte(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtp(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtn(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_rtz(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);\n" "ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);\n" "ulong8 __ovld __cnfn convert_ulong8(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_rte(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtp(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtn(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_rtz(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);\n" "ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);\n" "ulong16 __ovld __cnfn convert_ulong16(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_rte(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtp(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtn(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_rtz(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);\n" "ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);\n" "char __ovld __cnfn convert_char(half);\n" "char __ovld __cnfn convert_char_rte(half);\n" "char __ovld __cnfn convert_char_rtp(half);\n" "char __ovld __cnfn convert_char_rtn(half);\n" "char __ovld __cnfn convert_char_rtz(half);\n" "char __ovld __cnfn convert_char_sat(half);\n" "char __ovld __cnfn convert_char_sat_rte(half);\n" "char __ovld __cnfn convert_char_sat_rtp(half);\n" "char __ovld __cnfn convert_char_sat_rtn(half);\n" "char __ovld __cnfn convert_char_sat_rtz(half);\n" "char2 __ovld __cnfn convert_char2(half2);\n" "char2 __ovld __cnfn convert_char2_rte(half2);\n" "char2 __ovld __cnfn convert_char2_rtp(half2);\n" "char2 __ovld __cnfn convert_char2_rtn(half2);\n" "char2 __ovld __cnfn convert_char2_rtz(half2);\n" "char2 __ovld __cnfn convert_char2_sat(half2);\n" "char2 __ovld __cnfn convert_char2_sat_rte(half2);\n" "char2 __ovld __cnfn convert_char2_sat_rtp(half2);\n" "char2 __ovld __cnfn convert_char2_sat_rtn(half2);\n" "char2 __ovld __cnfn convert_char2_sat_rtz(half2);\n" "char3 __ovld __cnfn convert_char3(half3);\n" "char3 __ovld __cnfn convert_char3_rte(half3);\n" "char3 __ovld __cnfn convert_char3_rtp(half3);\n" "char3 __ovld __cnfn convert_char3_rtn(half3);\n" "char3 __ovld __cnfn convert_char3_rtz(half3);\n" "char3 __ovld __cnfn convert_char3_sat(half3);\n" "char3 __ovld __cnfn convert_char3_sat_rte(half3);\n" "char3 __ovld __cnfn convert_char3_sat_rtp(half3);\n" "char3 __ovld __cnfn convert_char3_sat_rtn(half3);\n" "char3 __ovld __cnfn convert_char3_sat_rtz(half3);\n" "char4 __ovld __cnfn convert_char4(half4);\n" "char4 __ovld __cnfn convert_char4_rte(half4);\n" "char4 __ovld __cnfn convert_char4_rtp(half4);\n" "char4 __ovld __cnfn convert_char4_rtn(half4);\n" "char4 __ovld __cnfn convert_char4_rtz(half4);\n" "char4 __ovld __cnfn convert_char4_sat(half4);\n" "char4 __ovld __cnfn convert_char4_sat_rte(half4);\n" "char4 __ovld __cnfn convert_char4_sat_rtp(half4);\n" "char4 __ovld __cnfn convert_char4_sat_rtn(half4);\n" "char4 __ovld __cnfn convert_char4_sat_rtz(half4);\n" "char8 __ovld __cnfn convert_char8(half8);\n" "char8 __ovld __cnfn convert_char8_rte(half8);\n" "char8 __ovld __cnfn convert_char8_rtp(half8);\n" "char8 __ovld __cnfn convert_char8_rtn(half8);\n" "char8 __ovld __cnfn convert_char8_rtz(half8);\n" "char8 __ovld __cnfn convert_char8_sat(half8);\n" "char8 __ovld __cnfn convert_char8_sat_rte(half8);\n" "char8 __ovld __cnfn convert_char8_sat_rtp(half8);\n" "char8 __ovld __cnfn convert_char8_sat_rtn(half8);\n" "char8 __ovld __cnfn convert_char8_sat_rtz(half8);\n" "char16 __ovld __cnfn convert_char16(half16);\n" "char16 __ovld __cnfn convert_char16_rte(half16);\n" "char16 __ovld __cnfn convert_char16_rtp(half16);\n" "char16 __ovld __cnfn convert_char16_rtn(half16);\n" "char16 __ovld __cnfn convert_char16_rtz(half16);\n" "char16 __ovld __cnfn convert_char16_sat(half16);\n" "char16 __ovld __cnfn convert_char16_sat_rte(half16);\n" "char16 __ovld __cnfn convert_char16_sat_rtp(half16);\n" "char16 __ovld __cnfn convert_char16_sat_rtn(half16);\n" "char16 __ovld __cnfn convert_char16_sat_rtz(half16);\n" "short __ovld __cnfn convert_short(half);\n" "short __ovld __cnfn convert_short_rte(half);\n" "short __ovld __cnfn convert_short_rtp(half);\n" "short __ovld __cnfn convert_short_rtn(half);\n" "short __ovld __cnfn convert_short_rtz(half);\n" "short __ovld __cnfn convert_short_sat(half);\n" "short __ovld __cnfn convert_short_sat_rte(half);\n" "short __ovld __cnfn convert_short_sat_rtp(half);\n" "short __ovld __cnfn convert_short_sat_rtn(half);\n" "short __ovld __cnfn convert_short_sat_rtz(half);\n" "short2 __ovld __cnfn convert_short2(half2);\n" "short2 __ovld __cnfn convert_short2_rte(half2);\n" "short2 __ovld __cnfn convert_short2_rtp(half2);\n" "short2 __ovld __cnfn convert_short2_rtn(half2);\n" "short2 __ovld __cnfn convert_short2_rtz(half2);\n" "short2 __ovld __cnfn convert_short2_sat(half2);\n" "short2 __ovld __cnfn convert_short2_sat_rte(half2);\n" "short2 __ovld __cnfn convert_short2_sat_rtp(half2);\n" "short2 __ovld __cnfn convert_short2_sat_rtn(half2);\n" "short2 __ovld __cnfn convert_short2_sat_rtz(half2);\n" "short3 __ovld __cnfn convert_short3(half3);\n" "short3 __ovld __cnfn convert_short3_rte(half3);\n" "short3 __ovld __cnfn convert_short3_rtp(half3);\n" "short3 __ovld __cnfn convert_short3_rtn(half3);\n" "short3 __ovld __cnfn convert_short3_rtz(half3);\n" "short3 __ovld __cnfn convert_short3_sat(half3);\n" "short3 __ovld __cnfn convert_short3_sat_rte(half3);\n" "short3 __ovld __cnfn convert_short3_sat_rtp(half3);\n" "short3 __ovld __cnfn convert_short3_sat_rtn(half3);\n" "short3 __ovld __cnfn convert_short3_sat_rtz(half3);\n" "short4 __ovld __cnfn convert_short4(half4);\n" "short4 __ovld __cnfn convert_short4_rte(half4);\n" "short4 __ovld __cnfn convert_short4_rtp(half4);\n" "short4 __ovld __cnfn convert_short4_rtn(half4);\n" "short4 __ovld __cnfn convert_short4_rtz(half4);\n" "short4 __ovld __cnfn convert_short4_sat(half4);\n" "short4 __ovld __cnfn convert_short4_sat_rte(half4);\n" "short4 __ovld __cnfn convert_short4_sat_rtp(half4);\n" "short4 __ovld __cnfn convert_short4_sat_rtn(half4);\n" "short4 __ovld __cnfn convert_short4_sat_rtz(half4);\n" "short8 __ovld __cnfn convert_short8(half8);\n" "short8 __ovld __cnfn convert_short8_rte(half8);\n" "short8 __ovld __cnfn convert_short8_rtp(half8);\n" "short8 __ovld __cnfn convert_short8_rtn(half8);\n" "short8 __ovld __cnfn convert_short8_rtz(half8);\n" "short8 __ovld __cnfn convert_short8_sat(half8);\n" "short8 __ovld __cnfn convert_short8_sat_rte(half8);\n" "short8 __ovld __cnfn convert_short8_sat_rtp(half8);\n" "short8 __ovld __cnfn convert_short8_sat_rtn(half8);\n" "short8 __ovld __cnfn convert_short8_sat_rtz(half8);\n" "short16 __ovld __cnfn convert_short16(half16);\n" "short16 __ovld __cnfn convert_short16_rte(half16);\n" "short16 __ovld __cnfn convert_short16_rtp(half16);\n" "short16 __ovld __cnfn convert_short16_rtn(half16);\n" "short16 __ovld __cnfn convert_short16_rtz(half16);\n" "short16 __ovld __cnfn convert_short16_sat(half16);\n" "short16 __ovld __cnfn convert_short16_sat_rte(half16);\n" "short16 __ovld __cnfn convert_short16_sat_rtp(half16);\n" "short16 __ovld __cnfn convert_short16_sat_rtn(half16);\n" "short16 __ovld __cnfn convert_short16_sat_rtz(half16);\n" "int __ovld __cnfn convert_int(half);\n" "int __ovld __cnfn convert_int_rte(half);\n" "int __ovld __cnfn convert_int_rtp(half);\n" "int __ovld __cnfn convert_int_rtn(half);\n" "int __ovld __cnfn convert_int_rtz(half);\n" "int __ovld __cnfn convert_int_sat(half);\n" "int __ovld __cnfn convert_int_sat_rte(half);\n" "int __ovld __cnfn convert_int_sat_rtp(half);\n" "int __ovld __cnfn convert_int_sat_rtn(half);\n" "int __ovld __cnfn convert_int_sat_rtz(half);\n" "int2 __ovld __cnfn convert_int2(half2);\n" "int2 __ovld __cnfn convert_int2_rte(half2);\n" "int2 __ovld __cnfn convert_int2_rtp(half2);\n" "int2 __ovld __cnfn convert_int2_rtn(half2);\n" "int2 __ovld __cnfn convert_int2_rtz(half2);\n" "int2 __ovld __cnfn convert_int2_sat(half2);\n" "int2 __ovld __cnfn convert_int2_sat_rte(half2);\n" "int2 __ovld __cnfn convert_int2_sat_rtp(half2);\n" "int2 __ovld __cnfn convert_int2_sat_rtn(half2);\n" "int2 __ovld __cnfn convert_int2_sat_rtz(half2);\n" "int3 __ovld __cnfn convert_int3(half3);\n" "int3 __ovld __cnfn convert_int3_rte(half3);\n" "int3 __ovld __cnfn convert_int3_rtp(half3);\n" "int3 __ovld __cnfn convert_int3_rtn(half3);\n" "int3 __ovld __cnfn convert_int3_rtz(half3);\n" "int3 __ovld __cnfn convert_int3_sat(half3);\n" "int3 __ovld __cnfn convert_int3_sat_rte(half3);\n" "int3 __ovld __cnfn convert_int3_sat_rtp(half3);\n" "int3 __ovld __cnfn convert_int3_sat_rtn(half3);\n" "int3 __ovld __cnfn convert_int3_sat_rtz(half3);\n" "int4 __ovld __cnfn convert_int4(half4);\n" "int4 __ovld __cnfn convert_int4_rte(half4);\n" "int4 __ovld __cnfn convert_int4_rtp(half4);\n" "int4 __ovld __cnfn convert_int4_rtn(half4);\n" "int4 __ovld __cnfn convert_int4_rtz(half4);\n" "int4 __ovld __cnfn convert_int4_sat(half4);\n" "int4 __ovld __cnfn convert_int4_sat_rte(half4);\n" "int4 __ovld __cnfn convert_int4_sat_rtp(half4);\n" "int4 __ovld __cnfn convert_int4_sat_rtn(half4);\n" "int4 __ovld __cnfn convert_int4_sat_rtz(half4);\n" "int8 __ovld __cnfn convert_int8(half8);\n" "int8 __ovld __cnfn convert_int8_rte(half8);\n" "int8 __ovld __cnfn convert_int8_rtp(half8);\n" "int8 __ovld __cnfn convert_int8_rtn(half8);\n" "int8 __ovld __cnfn convert_int8_rtz(half8);\n" "int8 __ovld __cnfn convert_int8_sat(half8);\n" "int8 __ovld __cnfn convert_int8_sat_rte(half8);\n" "int8 __ovld __cnfn convert_int8_sat_rtp(half8);\n" "int8 __ovld __cnfn convert_int8_sat_rtn(half8);\n" "int8 __ovld __cnfn convert_int8_sat_rtz(half8);\n" "int16 __ovld __cnfn convert_int16(half16);\n" "int16 __ovld __cnfn convert_int16_rte(half16);\n" "int16 __ovld __cnfn convert_int16_rtp(half16);\n" "int16 __ovld __cnfn convert_int16_rtn(half16);\n" "int16 __ovld __cnfn convert_int16_rtz(half16);\n" "int16 __ovld __cnfn convert_int16_sat(half16);\n" "int16 __ovld __cnfn convert_int16_sat_rte(half16);\n" "int16 __ovld __cnfn convert_int16_sat_rtp(half16);\n" "int16 __ovld __cnfn convert_int16_sat_rtn(half16);\n" "int16 __ovld __cnfn convert_int16_sat_rtz(half16);\n" "long __ovld __cnfn convert_long(half);\n" "long __ovld __cnfn convert_long_rte(half);\n" "long __ovld __cnfn convert_long_rtp(half);\n" "long __ovld __cnfn convert_long_rtn(half);\n" "long __ovld __cnfn convert_long_rtz(half);\n" "long __ovld __cnfn convert_long_sat(half);\n" "long __ovld __cnfn convert_long_sat_rte(half);\n" "long __ovld __cnfn convert_long_sat_rtp(half);\n" "long __ovld __cnfn convert_long_sat_rtn(half);\n" "long __ovld __cnfn convert_long_sat_rtz(half);\n" "long2 __ovld __cnfn convert_long2(half2);\n" "long2 __ovld __cnfn convert_long2_rte(half2);\n" "long2 __ovld __cnfn convert_long2_rtp(half2);\n" "long2 __ovld __cnfn convert_long2_rtn(half2);\n" "long2 __ovld __cnfn convert_long2_rtz(half2);\n" "long2 __ovld __cnfn convert_long2_sat(half2);\n" "long2 __ovld __cnfn convert_long2_sat_rte(half2);\n" "long2 __ovld __cnfn convert_long2_sat_rtp(half2);\n" "long2 __ovld __cnfn convert_long2_sat_rtn(half2);\n" "long2 __ovld __cnfn convert_long2_sat_rtz(half2);\n" "long3 __ovld __cnfn convert_long3(half3);\n" "long3 __ovld __cnfn convert_long3_rte(half3);\n" "long3 __ovld __cnfn convert_long3_rtp(half3);\n" "long3 __ovld __cnfn convert_long3_rtn(half3);\n" "long3 __ovld __cnfn convert_long3_rtz(half3);\n" "long3 __ovld __cnfn convert_long3_sat(half3);\n" "long3 __ovld __cnfn convert_long3_sat_rte(half3);\n" "long3 __ovld __cnfn convert_long3_sat_rtp(half3);\n" "long3 __ovld __cnfn convert_long3_sat_rtn(half3);\n" "long3 __ovld __cnfn convert_long3_sat_rtz(half3);\n" "long4 __ovld __cnfn convert_long4(half4);\n" "long4 __ovld __cnfn convert_long4_rte(half4);\n" "long4 __ovld __cnfn convert_long4_rtp(half4);\n" "long4 __ovld __cnfn convert_long4_rtn(half4);\n" "long4 __ovld __cnfn convert_long4_rtz(half4);\n" "long4 __ovld __cnfn convert_long4_sat(half4);\n" "long4 __ovld __cnfn convert_long4_sat_rte(half4);\n" "long4 __ovld __cnfn convert_long4_sat_rtp(half4);\n" "long4 __ovld __cnfn convert_long4_sat_rtn(half4);\n" "long4 __ovld __cnfn convert_long4_sat_rtz(half4);\n" "long8 __ovld __cnfn convert_long8(half8);\n" "long8 __ovld __cnfn convert_long8_rte(half8);\n" "long8 __ovld __cnfn convert_long8_rtp(half8);\n" "long8 __ovld __cnfn convert_long8_rtn(half8);\n" "long8 __ovld __cnfn convert_long8_rtz(half8);\n" "long8 __ovld __cnfn convert_long8_sat(half8);\n" "long8 __ovld __cnfn convert_long8_sat_rte(half8);\n" "long8 __ovld __cnfn convert_long8_sat_rtp(half8);\n" "long8 __ovld __cnfn convert_long8_sat_rtn(half8);\n" "long8 __ovld __cnfn convert_long8_sat_rtz(half8);\n" "long16 __ovld __cnfn convert_long16(half16);\n" "long16 __ovld __cnfn convert_long16_rte(half16);\n" "long16 __ovld __cnfn convert_long16_rtp(half16);\n" "long16 __ovld __cnfn convert_long16_rtn(half16);\n" "long16 __ovld __cnfn convert_long16_rtz(half16);\n" "long16 __ovld __cnfn convert_long16_sat(half16);\n" "long16 __ovld __cnfn convert_long16_sat_rte(half16);\n" "long16 __ovld __cnfn convert_long16_sat_rtp(half16);\n" "long16 __ovld __cnfn convert_long16_sat_rtn(half16);\n" "long16 __ovld __cnfn convert_long16_sat_rtz(half16);\n" "float __ovld __cnfn convert_float(half);\n" "float __ovld __cnfn convert_float_rte(half);\n" "float __ovld __cnfn convert_float_rtp(half);\n" "float __ovld __cnfn convert_float_rtn(half);\n" "float __ovld __cnfn convert_float_rtz(half);\n" "float2 __ovld __cnfn convert_float2(half2);\n" "float2 __ovld __cnfn convert_float2_rte(half2);\n" "float2 __ovld __cnfn convert_float2_rtp(half2);\n" "float2 __ovld __cnfn convert_float2_rtn(half2);\n" "float2 __ovld __cnfn convert_float2_rtz(half2);\n" "float3 __ovld __cnfn convert_float3(half3);\n" "float3 __ovld __cnfn convert_float3_rte(half3);\n" "float3 __ovld __cnfn convert_float3_rtp(half3);\n" "float3 __ovld __cnfn convert_float3_rtn(half3);\n" "float3 __ovld __cnfn convert_float3_rtz(half3);\n" "float4 __ovld __cnfn convert_float4(half4);\n" "float4 __ovld __cnfn convert_float4_rte(half4);\n" "float4 __ovld __cnfn convert_float4_rtp(half4);\n" "float4 __ovld __cnfn convert_float4_rtn(half4);\n" "float4 __ovld __cnfn convert_float4_rtz(half4);\n" "float8 __ovld __cnfn convert_float8(half8);\n" "float8 __ovld __cnfn convert_float8_rte(half8);\n" "float8 __ovld __cnfn convert_float8_rtp(half8);\n" "float8 __ovld __cnfn convert_float8_rtn(half8);\n" "float8 __ovld __cnfn convert_float8_rtz(half8);\n" "float16 __ovld __cnfn convert_float16(half16);\n" "float16 __ovld __cnfn convert_float16_rte(half16);\n" "float16 __ovld __cnfn convert_float16_rtp(half16);\n" "float16 __ovld __cnfn convert_float16_rtn(half16);\n" "float16 __ovld __cnfn convert_float16_rtz(half16);\n" "\n" "// Convert non-double types to half types.\n" "half __ovld __cnfn convert_half(uchar);\n" "half __ovld __cnfn convert_half(ushort);\n" "half __ovld __cnfn convert_half(uint);\n" "half __ovld __cnfn convert_half(ulong);\n" "half __ovld __cnfn convert_half(char);\n" "half __ovld __cnfn convert_half(short);\n" "half __ovld __cnfn convert_half(int);\n" "half __ovld __cnfn convert_half(long);\n" "half __ovld __cnfn convert_half(float);\n" "half __ovld __cnfn convert_half(half);\n" "half __ovld __cnfn convert_half_rte(uchar);\n" "half __ovld __cnfn convert_half_rte(ushort);\n" "half __ovld __cnfn convert_half_rte(uint);\n" "half __ovld __cnfn convert_half_rte(ulong);\n" "half __ovld __cnfn convert_half_rte(char);\n" "half __ovld __cnfn convert_half_rte(short);\n" "half __ovld __cnfn convert_half_rte(int);\n" "half __ovld __cnfn convert_half_rte(long);\n" "half __ovld __cnfn convert_half_rte(float);\n" "half __ovld __cnfn convert_half_rte(half);\n" "half __ovld __cnfn convert_half_rtp(uchar);\n" "half __ovld __cnfn convert_half_rtp(ushort);\n" "half __ovld __cnfn convert_half_rtp(uint);\n" "half __ovld __cnfn convert_half_rtp(ulong);\n" "half __ovld __cnfn convert_half_rtp(char);\n" "half __ovld __cnfn convert_half_rtp(short);\n" "half __ovld __cnfn convert_half_rtp(int);\n" "half __ovld __cnfn convert_half_rtp(long);\n" "half __ovld __cnfn convert_half_rtp(float);\n" "half __ovld __cnfn convert_half_rtp(half);\n" "half __ovld __cnfn convert_half_rtn(uchar);\n" "half __ovld __cnfn convert_half_rtn(ushort);\n" "half __ovld __cnfn convert_half_rtn(uint);\n" "half __ovld __cnfn convert_half_rtn(ulong);\n" "half __ovld __cnfn convert_half_rtn(char);\n" "half __ovld __cnfn convert_half_rtn(short);\n" "half __ovld __cnfn convert_half_rtn(int);\n" "half __ovld __cnfn convert_half_rtn(long);\n" "half __ovld __cnfn convert_half_rtn(float);\n" "half __ovld __cnfn convert_half_rtn(half);\n" "half __ovld __cnfn convert_half_rtz(uchar);\n" "half __ovld __cnfn convert_half_rtz(ushort);\n" "half __ovld __cnfn convert_half_rtz(uint);\n" "half __ovld __cnfn convert_half_rtz(ulong);\n" "half __ovld __cnfn convert_half_rtz(char);\n" "half __ovld __cnfn convert_half_rtz(short);\n" "half __ovld __cnfn convert_half_rtz(int);\n" "half __ovld __cnfn convert_half_rtz(long);\n" "half __ovld __cnfn convert_half_rtz(float);\n" "half __ovld __cnfn convert_half_rtz(half);\n" "half2 __ovld __cnfn convert_half2(char2);\n" "half2 __ovld __cnfn convert_half2(uchar2);\n" "half2 __ovld __cnfn convert_half2(short2);\n" "half2 __ovld __cnfn convert_half2(ushort2);\n" "half2 __ovld __cnfn convert_half2(int2);\n" "half2 __ovld __cnfn convert_half2(uint2);\n" "half2 __ovld __cnfn convert_half2(long2);\n" "half2 __ovld __cnfn convert_half2(ulong2);\n" "half2 __ovld __cnfn convert_half2(float2);\n" "half2 __ovld __cnfn convert_half2(half2);\n" "half2 __ovld __cnfn convert_half2_rte(char2);\n" "half2 __ovld __cnfn convert_half2_rte(uchar2);\n" "half2 __ovld __cnfn convert_half2_rte(short2);\n" "half2 __ovld __cnfn convert_half2_rte(ushort2);\n" "half2 __ovld __cnfn convert_half2_rte(int2);\n" "half2 __ovld __cnfn convert_half2_rte(uint2);\n" "half2 __ovld __cnfn convert_half2_rte(long2);\n" "half2 __ovld __cnfn convert_half2_rte(ulong2);\n" "half2 __ovld __cnfn convert_half2_rte(float2);\n" "half2 __ovld __cnfn convert_half2_rte(half2);\n" "half2 __ovld __cnfn convert_half2_rtp(char2);\n" "half2 __ovld __cnfn convert_half2_rtp(uchar2);\n" "half2 __ovld __cnfn convert_half2_rtp(short2);\n" "half2 __ovld __cnfn convert_half2_rtp(ushort2);\n" "half2 __ovld __cnfn convert_half2_rtp(int2);\n" "half2 __ovld __cnfn convert_half2_rtp(uint2);\n" "half2 __ovld __cnfn convert_half2_rtp(long2);\n" "half2 __ovld __cnfn convert_half2_rtp(ulong2);\n" "half2 __ovld __cnfn convert_half2_rtp(float2);\n" "half2 __ovld __cnfn convert_half2_rtp(half2);\n" "half2 __ovld __cnfn convert_half2_rtn(char2);\n" "half2 __ovld __cnfn convert_half2_rtn(uchar2);\n" "half2 __ovld __cnfn convert_half2_rtn(short2);\n" "half2 __ovld __cnfn convert_half2_rtn(ushort2);\n" "half2 __ovld __cnfn convert_half2_rtn(int2);\n" "half2 __ovld __cnfn convert_half2_rtn(uint2);\n" "half2 __ovld __cnfn convert_half2_rtn(long2);\n" "half2 __ovld __cnfn convert_half2_rtn(ulong2);\n" "half2 __ovld __cnfn convert_half2_rtn(float2);\n" "half2 __ovld __cnfn convert_half2_rtn(half2);\n" "half2 __ovld __cnfn convert_half2_rtz(char2);\n" "half2 __ovld __cnfn convert_half2_rtz(uchar2);\n" "half2 __ovld __cnfn convert_half2_rtz(short2);\n" "half2 __ovld __cnfn convert_half2_rtz(ushort2);\n" "half2 __ovld __cnfn convert_half2_rtz(int2);\n" "half2 __ovld __cnfn convert_half2_rtz(uint2);\n" "half2 __ovld __cnfn convert_half2_rtz(long2);\n" "half2 __ovld __cnfn convert_half2_rtz(ulong2);\n" "half2 __ovld __cnfn convert_half2_rtz(float2);\n" "half2 __ovld __cnfn convert_half2_rtz(half2);\n" "half3 __ovld __cnfn convert_half3(char3);\n" "half3 __ovld __cnfn convert_half3(uchar3);\n" "half3 __ovld __cnfn convert_half3(short3);\n" "half3 __ovld __cnfn convert_half3(ushort3);\n" "half3 __ovld __cnfn convert_half3(int3);\n" "half3 __ovld __cnfn convert_half3(uint3);\n" "half3 __ovld __cnfn convert_half3(long3);\n" "half3 __ovld __cnfn convert_half3(ulong3);\n" "half3 __ovld __cnfn convert_half3(float3);\n" "half3 __ovld __cnfn convert_half3(half3);\n" "half3 __ovld __cnfn convert_half3_rte(char3);\n" "half3 __ovld __cnfn convert_half3_rte(uchar3);\n" "half3 __ovld __cnfn convert_half3_rte(short3);\n" "half3 __ovld __cnfn convert_half3_rte(ushort3);\n" "half3 __ovld __cnfn convert_half3_rte(int3);\n" "half3 __ovld __cnfn convert_half3_rte(uint3);\n" "half3 __ovld __cnfn convert_half3_rte(long3);\n" "half3 __ovld __cnfn convert_half3_rte(ulong3);\n" "half3 __ovld __cnfn convert_half3_rte(float3);\n" "half3 __ovld __cnfn convert_half3_rte(half3);\n" "half3 __ovld __cnfn convert_half3_rtp(char3);\n" "half3 __ovld __cnfn convert_half3_rtp(uchar3);\n" "half3 __ovld __cnfn convert_half3_rtp(short3);\n" "half3 __ovld __cnfn convert_half3_rtp(ushort3);\n" "half3 __ovld __cnfn convert_half3_rtp(int3);\n" "half3 __ovld __cnfn convert_half3_rtp(uint3);\n" "half3 __ovld __cnfn convert_half3_rtp(long3);\n" "half3 __ovld __cnfn convert_half3_rtp(ulong3);\n" "half3 __ovld __cnfn convert_half3_rtp(float3);\n" "half3 __ovld __cnfn convert_half3_rtp(half3);\n" "half3 __ovld __cnfn convert_half3_rtn(char3);\n" "half3 __ovld __cnfn convert_half3_rtn(uchar3);\n" "half3 __ovld __cnfn convert_half3_rtn(short3);\n" "half3 __ovld __cnfn convert_half3_rtn(ushort3);\n" "half3 __ovld __cnfn convert_half3_rtn(int3);\n" "half3 __ovld __cnfn convert_half3_rtn(uint3);\n" "half3 __ovld __cnfn convert_half3_rtn(long3);\n" "half3 __ovld __cnfn convert_half3_rtn(ulong3);\n" "half3 __ovld __cnfn convert_half3_rtn(float3);\n" "half3 __ovld __cnfn convert_half3_rtn(half3);\n" "half3 __ovld __cnfn convert_half3_rtz(char3);\n" "half3 __ovld __cnfn convert_half3_rtz(uchar3);\n" "half3 __ovld __cnfn convert_half3_rtz(short3);\n" "half3 __ovld __cnfn convert_half3_rtz(ushort3);\n" "half3 __ovld __cnfn convert_half3_rtz(int3);\n" "half3 __ovld __cnfn convert_half3_rtz(uint3);\n" "half3 __ovld __cnfn convert_half3_rtz(long3);\n" "half3 __ovld __cnfn convert_half3_rtz(ulong3);\n" "half3 __ovld __cnfn convert_half3_rtz(float3);\n" "half3 __ovld __cnfn convert_half3_rtz(half3);\n" "half4 __ovld __cnfn convert_half4(char4);\n" "half4 __ovld __cnfn convert_half4(uchar4);\n" "half4 __ovld __cnfn convert_half4(short4);\n" "half4 __ovld __cnfn convert_half4(ushort4);\n" "half4 __ovld __cnfn convert_half4(int4);\n" "half4 __ovld __cnfn convert_half4(uint4);\n" "half4 __ovld __cnfn convert_half4(long4);\n" "half4 __ovld __cnfn convert_half4(ulong4);\n" "half4 __ovld __cnfn convert_half4(float4);\n" "half4 __ovld __cnfn convert_half4(half4);\n" "half4 __ovld __cnfn convert_half4_rte(char4);\n" "half4 __ovld __cnfn convert_half4_rte(uchar4);\n" "half4 __ovld __cnfn convert_half4_rte(short4);\n" "half4 __ovld __cnfn convert_half4_rte(ushort4);\n" "half4 __ovld __cnfn convert_half4_rte(int4);\n" "half4 __ovld __cnfn convert_half4_rte(uint4);\n" "half4 __ovld __cnfn convert_half4_rte(long4);\n" "half4 __ovld __cnfn convert_half4_rte(ulong4);\n" "half4 __ovld __cnfn convert_half4_rte(float4);\n" "half4 __ovld __cnfn convert_half4_rte(half4);\n" "half4 __ovld __cnfn convert_half4_rtp(char4);\n" "half4 __ovld __cnfn convert_half4_rtp(uchar4);\n" "half4 __ovld __cnfn convert_half4_rtp(short4);\n" "half4 __ovld __cnfn convert_half4_rtp(ushort4);\n" "half4 __ovld __cnfn convert_half4_rtp(int4);\n" "half4 __ovld __cnfn convert_half4_rtp(uint4);\n" "half4 __ovld __cnfn convert_half4_rtp(long4);\n" "half4 __ovld __cnfn convert_half4_rtp(ulong4);\n" "half4 __ovld __cnfn convert_half4_rtp(float4);\n" "half4 __ovld __cnfn convert_half4_rtp(half4);\n" "half4 __ovld __cnfn convert_half4_rtn(char4);\n" "half4 __ovld __cnfn convert_half4_rtn(uchar4);\n" "half4 __ovld __cnfn convert_half4_rtn(short4);\n" "half4 __ovld __cnfn convert_half4_rtn(ushort4);\n" "half4 __ovld __cnfn convert_half4_rtn(int4);\n" "half4 __ovld __cnfn convert_half4_rtn(uint4);\n" "half4 __ovld __cnfn convert_half4_rtn(long4);\n" "half4 __ovld __cnfn convert_half4_rtn(ulong4);\n" "half4 __ovld __cnfn convert_half4_rtn(float4);\n" "half4 __ovld __cnfn convert_half4_rtn(half4);\n" "half4 __ovld __cnfn convert_half4_rtz(char4);\n" "half4 __ovld __cnfn convert_half4_rtz(uchar4);\n" "half4 __ovld __cnfn convert_half4_rtz(short4);\n" "half4 __ovld __cnfn convert_half4_rtz(ushort4);\n" "half4 __ovld __cnfn convert_half4_rtz(int4);\n" "half4 __ovld __cnfn convert_half4_rtz(uint4);\n" "half4 __ovld __cnfn convert_half4_rtz(long4);\n" "half4 __ovld __cnfn convert_half4_rtz(ulong4);\n" "half4 __ovld __cnfn convert_half4_rtz(float4);\n" "half4 __ovld __cnfn convert_half4_rtz(half4);\n" "half8 __ovld __cnfn convert_half8(char8);\n" "half8 __ovld __cnfn convert_half8(uchar8);\n" "half8 __ovld __cnfn convert_half8(short8);\n" "half8 __ovld __cnfn convert_half8(ushort8);\n" "half8 __ovld __cnfn convert_half8(int8);\n" "half8 __ovld __cnfn convert_half8(uint8);\n" "half8 __ovld __cnfn convert_half8(long8);\n" "half8 __ovld __cnfn convert_half8(ulong8);\n" "half8 __ovld __cnfn convert_half8(float8);\n" "half8 __ovld __cnfn convert_half8(half8);\n" "half8 __ovld __cnfn convert_half8_rte(char8);\n" "half8 __ovld __cnfn convert_half8_rte(uchar8);\n" "half8 __ovld __cnfn convert_half8_rte(short8);\n" "half8 __ovld __cnfn convert_half8_rte(ushort8);\n" "half8 __ovld __cnfn convert_half8_rte(int8);\n" "half8 __ovld __cnfn convert_half8_rte(uint8);\n" "half8 __ovld __cnfn convert_half8_rte(long8);\n" "half8 __ovld __cnfn convert_half8_rte(ulong8);\n" "half8 __ovld __cnfn convert_half8_rte(float8);\n" "half8 __ovld __cnfn convert_half8_rte(half8);\n" "half8 __ovld __cnfn convert_half8_rtp(char8);\n" "half8 __ovld __cnfn convert_half8_rtp(uchar8);\n" "half8 __ovld __cnfn convert_half8_rtp(short8);\n" "half8 __ovld __cnfn convert_half8_rtp(ushort8);\n" "half8 __ovld __cnfn convert_half8_rtp(int8);\n" "half8 __ovld __cnfn convert_half8_rtp(uint8);\n" "half8 __ovld __cnfn convert_half8_rtp(long8);\n" "half8 __ovld __cnfn convert_half8_rtp(ulong8);\n" "half8 __ovld __cnfn convert_half8_rtp(float8);\n" "half8 __ovld __cnfn convert_half8_rtp(half8);\n" "half8 __ovld __cnfn convert_half8_rtn(char8);\n" "half8 __ovld __cnfn convert_half8_rtn(uchar8);\n" "half8 __ovld __cnfn convert_half8_rtn(short8);\n" "half8 __ovld __cnfn convert_half8_rtn(ushort8);\n" "half8 __ovld __cnfn convert_half8_rtn(int8);\n" "half8 __ovld __cnfn convert_half8_rtn(uint8);\n" "half8 __ovld __cnfn convert_half8_rtn(long8);\n" "half8 __ovld __cnfn convert_half8_rtn(ulong8);\n" "half8 __ovld __cnfn convert_half8_rtn(float8);\n" "half8 __ovld __cnfn convert_half8_rtn(half8);\n" "half8 __ovld __cnfn convert_half8_rtz(char8);\n" "half8 __ovld __cnfn convert_half8_rtz(uchar8);\n" "half8 __ovld __cnfn convert_half8_rtz(short8);\n" "half8 __ovld __cnfn convert_half8_rtz(ushort8);\n" "half8 __ovld __cnfn convert_half8_rtz(int8);\n" "half8 __ovld __cnfn convert_half8_rtz(uint8);\n" "half8 __ovld __cnfn convert_half8_rtz(long8);\n" "half8 __ovld __cnfn convert_half8_rtz(ulong8);\n" "half8 __ovld __cnfn convert_half8_rtz(float8);\n" "half8 __ovld __cnfn convert_half8_rtz(half8);\n" "half16 __ovld __cnfn convert_half16(char16);\n" "half16 __ovld __cnfn convert_half16(uchar16);\n" "half16 __ovld __cnfn convert_half16(short16);\n" "half16 __ovld __cnfn convert_half16(ushort16);\n" "half16 __ovld __cnfn convert_half16(int16);\n" "half16 __ovld __cnfn convert_half16(uint16);\n" "half16 __ovld __cnfn convert_half16(long16);\n" "half16 __ovld __cnfn convert_half16(ulong16);\n" "half16 __ovld __cnfn convert_half16(float16);\n" "half16 __ovld __cnfn convert_half16(half16);\n" "half16 __ovld __cnfn convert_half16_rte(char16);\n" "half16 __ovld __cnfn convert_half16_rte(uchar16);\n" "half16 __ovld __cnfn convert_half16_rte(short16);\n" "half16 __ovld __cnfn convert_half16_rte(ushort16);\n" "half16 __ovld __cnfn convert_half16_rte(int16);\n" "half16 __ovld __cnfn convert_half16_rte(uint16);\n" "half16 __ovld __cnfn convert_half16_rte(long16);\n" "half16 __ovld __cnfn convert_half16_rte(ulong16);\n" "half16 __ovld __cnfn convert_half16_rte(float16);\n" "half16 __ovld __cnfn convert_half16_rte(half16);\n" "half16 __ovld __cnfn convert_half16_rtp(char16);\n" "half16 __ovld __cnfn convert_half16_rtp(uchar16);\n" "half16 __ovld __cnfn convert_half16_rtp(short16);\n" "half16 __ovld __cnfn convert_half16_rtp(ushort16);\n" "half16 __ovld __cnfn convert_half16_rtp(int16);\n" "half16 __ovld __cnfn convert_half16_rtp(uint16);\n" "half16 __ovld __cnfn convert_half16_rtp(long16);\n" "half16 __ovld __cnfn convert_half16_rtp(ulong16);\n" "half16 __ovld __cnfn convert_half16_rtp(float16);\n" "half16 __ovld __cnfn convert_half16_rtp(half16);\n" "half16 __ovld __cnfn convert_half16_rtn(char16);\n" "half16 __ovld __cnfn convert_half16_rtn(uchar16);\n" "half16 __ovld __cnfn convert_half16_rtn(short16);\n" "half16 __ovld __cnfn convert_half16_rtn(ushort16);\n" "half16 __ovld __cnfn convert_half16_rtn(int16);\n" "half16 __ovld __cnfn convert_half16_rtn(uint16);\n" "half16 __ovld __cnfn convert_half16_rtn(long16);\n" "half16 __ovld __cnfn convert_half16_rtn(ulong16);\n" "half16 __ovld __cnfn convert_half16_rtn(float16);\n" "half16 __ovld __cnfn convert_half16_rtn(half16);\n" "half16 __ovld __cnfn convert_half16_rtz(char16);\n" "half16 __ovld __cnfn convert_half16_rtz(uchar16);\n" "half16 __ovld __cnfn convert_half16_rtz(short16);\n" "half16 __ovld __cnfn convert_half16_rtz(ushort16);\n" "half16 __ovld __cnfn convert_half16_rtz(int16);\n" "half16 __ovld __cnfn convert_half16_rtz(uint16);\n" "half16 __ovld __cnfn convert_half16_rtz(long16);\n" "half16 __ovld __cnfn convert_half16_rtz(ulong16);\n" "half16 __ovld __cnfn convert_half16_rtz(float16);\n" "half16 __ovld __cnfn convert_half16_rtz(half16);\n" "\n" "// Convert half types to double types.\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn convert_double(half);\n" "double __ovld __cnfn convert_double_rte(half);\n" "double __ovld __cnfn convert_double_rtp(half);\n" "double __ovld __cnfn convert_double_rtn(half);\n" "double __ovld __cnfn convert_double_rtz(half);\n" "double2 __ovld __cnfn convert_double2(half2);\n" "double2 __ovld __cnfn convert_double2_rte(half2);\n" "double2 __ovld __cnfn convert_double2_rtp(half2);\n" "double2 __ovld __cnfn convert_double2_rtn(half2);\n" "double2 __ovld __cnfn convert_double2_rtz(half2);\n" "double3 __ovld __cnfn convert_double3(half3);\n" "double3 __ovld __cnfn convert_double3_rte(half3);\n" "double3 __ovld __cnfn convert_double3_rtp(half3);\n" "double3 __ovld __cnfn convert_double3_rtn(half3);\n" "double3 __ovld __cnfn convert_double3_rtz(half3);\n" "double4 __ovld __cnfn convert_double4(half4);\n" "double4 __ovld __cnfn convert_double4_rte(half4);\n" "double4 __ovld __cnfn convert_double4_rtp(half4);\n" "double4 __ovld __cnfn convert_double4_rtn(half4);\n" "double4 __ovld __cnfn convert_double4_rtz(half4);\n" "double8 __ovld __cnfn convert_double8(half8);\n" "double8 __ovld __cnfn convert_double8_rte(half8);\n" "double8 __ovld __cnfn convert_double8_rtp(half8);\n" "double8 __ovld __cnfn convert_double8_rtn(half8);\n" "double8 __ovld __cnfn convert_double8_rtz(half8);\n" "double16 __ovld __cnfn convert_double16(half16);\n" "double16 __ovld __cnfn convert_double16_rte(half16);\n" "double16 __ovld __cnfn convert_double16_rtp(half16);\n" "double16 __ovld __cnfn convert_double16_rtn(half16);\n" "double16 __ovld __cnfn convert_double16_rtz(half16);\n" "\n" "// Convert double types to half types.\n" "half __ovld __cnfn convert_half(double);\n" "half __ovld __cnfn convert_half_rte(double);\n" "half __ovld __cnfn convert_half_rtp(double);\n" "half __ovld __cnfn convert_half_rtn(double);\n" "half __ovld __cnfn convert_half_rtz(double);\n" "half2 __ovld __cnfn convert_half2(double2);\n" "half2 __ovld __cnfn convert_half2_rte(double2);\n" "half2 __ovld __cnfn convert_half2_rtp(double2);\n" "half2 __ovld __cnfn convert_half2_rtn(double2);\n" "half2 __ovld __cnfn convert_half2_rtz(double2);\n" "half3 __ovld __cnfn convert_half3(double3);\n" "half3 __ovld __cnfn convert_half3_rte(double3);\n" "half3 __ovld __cnfn convert_half3_rtp(double3);\n" "half3 __ovld __cnfn convert_half3_rtn(double3);\n" "half3 __ovld __cnfn convert_half3_rtz(double3);\n" "half4 __ovld __cnfn convert_half4(double4);\n" "half4 __ovld __cnfn convert_half4_rte(double4);\n" "half4 __ovld __cnfn convert_half4_rtp(double4);\n" "half4 __ovld __cnfn convert_half4_rtn(double4);\n" "half4 __ovld __cnfn convert_half4_rtz(double4);\n" "half8 __ovld __cnfn convert_half8(double8);\n" "half8 __ovld __cnfn convert_half8_rte(double8);\n" "half8 __ovld __cnfn convert_half8_rtp(double8);\n" "half8 __ovld __cnfn convert_half8_rtn(double8);\n" "half8 __ovld __cnfn convert_half8_rtz(double8);\n" "half16 __ovld __cnfn convert_half16(double16);\n" "half16 __ovld __cnfn convert_half16_rte(double16);\n" "half16 __ovld __cnfn convert_half16_rtp(double16);\n" "half16 __ovld __cnfn convert_half16_rtn(double16);\n" "half16 __ovld __cnfn convert_half16_rtz(double16);\n" "#endif //cl_khr_fp64\n" "\n" "#endif // cl_khr_fp16\n" "\n" "// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions\n" "\n" "/**\n" " * Returns the number of dimensions in use. This is the\n" " * value given to the work_dim argument specified in\n" " * clEnqueueNDRangeKernel.\n" " * For clEnqueueTask, this returns 1.\n" " */\n" "uint __ovld __cnfn get_work_dim(void);\n" "\n" "/**\n" " * Returns the number of global work-items specified for\n" " * dimension identified by dimindx. This value is given by\n" " * the global_work_size argument to\n" " * clEnqueueNDRangeKernel. Valid values of dimindx\n" " * are 0 to get_work_dim() - 1. For other values of\n" " * dimindx, get_global_size() returns 1.\n" " * For clEnqueueTask, this always returns 1.\n" " */\n" "size_t __ovld __cnfn get_global_size(uint);\n" "\n" "/**\n" " * Returns the unique global work-item ID value for\n" " * dimension identified by dimindx. The global work-item\n" " * ID specifies the work-item ID based on the number of\n" " * global work-items specified to execute the kernel. Valid\n" " * values of dimindx are 0 to get_work_dim() - 1. For\n" " * other values of dimindx, get_global_id() returns 0.\n" " * For clEnqueueTask, this returns 0.\n" " */\n" "size_t __ovld __cnfn get_global_id(uint);\n" "\n" "/**\n" " * Returns the number of local work-items specified in\n" " * dimension identified by dimindx. This value is given by\n" " * the local_work_size argument to\n" " * clEnqueueNDRangeKernel if local_work_size is not\n" " * NULL; otherwise the OpenCL implementation chooses\n" " * an appropriate local_work_size value which is returned\n" " * by this function. Valid values of dimindx are 0 to\n" " * get_work_dim() - 1. For other values of dimindx,\n" " * get_local_size() returns 1.\n" " * For clEnqueueTask, this always returns 1.\n" " */\n" "size_t __ovld __cnfn get_local_size(uint);\n" "\n" "/**\n" " * Returns the unique local work-item ID i.e. a work-item\n" " * within a specific work-group for dimension identified by\n" " * dimindx. Valid values of dimindx are 0 to\n" " * get_work_dim() - 1. For other values of dimindx,\n" " * get_local_id() returns 0.\n" " * For clEnqueueTask, this returns 0.\n" " */\n" "size_t __ovld __cnfn get_local_id(uint);\n" "\n" "/**\n" " * Returns the number of work-groups that will execute a\n" " * kernel for dimension identified by dimindx.\n" " * Valid values of dimindx are 0 to get_work_dim() - 1.\n" " * For other values of dimindx, get_num_groups() returns 1.\n" " * For clEnqueueTask, this always returns 1.\n" " */\n" "size_t __ovld __cnfn get_num_groups(uint);\n" "\n" "/**\n" " * get_group_id returns the work-group ID which is a\n" " * number from 0 .. get_num_groups(dimindx) - 1.\n" " * Valid values of dimindx are 0 to get_work_dim() - 1.\n" " * For other values, get_group_id() returns 0.\n" " * For clEnqueueTask, this returns 0.\n" " */\n" "size_t __ovld __cnfn get_group_id(uint);\n" "\n" "/**\n" " * get_global_offset returns the offset values specified in\n" " * global_work_offset argument to\n" " * clEnqueueNDRangeKernel.\n" " * Valid values of dimindx are 0 to get_work_dim() - 1.\n" " * For other values, get_global_offset() returns 0.\n" " * For clEnqueueTask, this returns 0.\n" " */\n" "size_t __ovld __cnfn get_global_offset(uint);\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "size_t __ovld get_enqueued_local_size(uint);\n" "size_t __ovld get_global_linear_id(void);\n" "size_t __ovld get_local_linear_id(void);\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions\n" "\n" "/**\n" " * Arc cosine function.\n" " */\n" "float __ovld __cnfn acos(float);\n" "float2 __ovld __cnfn acos(float2);\n" "float3 __ovld __cnfn acos(float3);\n" "float4 __ovld __cnfn acos(float4);\n" "float8 __ovld __cnfn acos(float8);\n" "float16 __ovld __cnfn acos(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn acos(double);\n" "double2 __ovld __cnfn acos(double2);\n" "double3 __ovld __cnfn acos(double3);\n" "double4 __ovld __cnfn acos(double4);\n" "double8 __ovld __cnfn acos(double8);\n" "double16 __ovld __cnfn acos(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn acos(half);\n" "half2 __ovld __cnfn acos(half2);\n" "half3 __ovld __cnfn acos(half3);\n" "half4 __ovld __cnfn acos(half4);\n" "half8 __ovld __cnfn acos(half8);\n" "half16 __ovld __cnfn acos(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Inverse hyperbolic cosine.\n" " */\n" "float __ovld __cnfn acosh(float);\n" "float2 __ovld __cnfn acosh(float2);\n" "float3 __ovld __cnfn acosh(float3);\n" "float4 __ovld __cnfn acosh(float4);\n" "float8 __ovld __cnfn acosh(float8);\n" "float16 __ovld __cnfn acosh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn acosh(double);\n" "double2 __ovld __cnfn acosh(double2);\n" "double3 __ovld __cnfn acosh(double3);\n" "double4 __ovld __cnfn acosh(double4);\n" "double8 __ovld __cnfn acosh(double8);\n" "double16 __ovld __cnfn acosh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn acosh(half);\n" "half2 __ovld __cnfn acosh(half2);\n" "half3 __ovld __cnfn acosh(half3);\n" "half4 __ovld __cnfn acosh(half4);\n" "half8 __ovld __cnfn acosh(half8);\n" "half16 __ovld __cnfn acosh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute acos (x) / PI.\n" " */\n" "float __ovld __cnfn acospi(float);\n" "float2 __ovld __cnfn acospi(float2);\n" "float3 __ovld __cnfn acospi(float3);\n" "float4 __ovld __cnfn acospi(float4);\n" "float8 __ovld __cnfn acospi(float8);\n" "float16 __ovld __cnfn acospi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn acospi(double);\n" "double2 __ovld __cnfn acospi(double2);\n" "double3 __ovld __cnfn acospi(double3);\n" "double4 __ovld __cnfn acospi(double4);\n" "double8 __ovld __cnfn acospi(double8);\n" "double16 __ovld __cnfn acospi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn acospi(half);\n" "half2 __ovld __cnfn acospi(half2);\n" "half3 __ovld __cnfn acospi(half3);\n" "half4 __ovld __cnfn acospi(half4);\n" "half8 __ovld __cnfn acospi(half8);\n" "half16 __ovld __cnfn acospi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Arc sine function.\n" " */\n" "float __ovld __cnfn asin(float);\n" "float2 __ovld __cnfn asin(float2);\n" "float3 __ovld __cnfn asin(float3);\n" "float4 __ovld __cnfn asin(float4);\n" "float8 __ovld __cnfn asin(float8);\n" "float16 __ovld __cnfn asin(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn asin(double);\n" "double2 __ovld __cnfn asin(double2);\n" "double3 __ovld __cnfn asin(double3);\n" "double4 __ovld __cnfn asin(double4);\n" "double8 __ovld __cnfn asin(double8);\n" "double16 __ovld __cnfn asin(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn asin(half);\n" "half2 __ovld __cnfn asin(half2);\n" "half3 __ovld __cnfn asin(half3);\n" "half4 __ovld __cnfn asin(half4);\n" "half8 __ovld __cnfn asin(half8);\n" "half16 __ovld __cnfn asin(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Inverse hyperbolic sine.\n" " */\n" "float __ovld __cnfn asinh(float);\n" "float2 __ovld __cnfn asinh(float2);\n" "float3 __ovld __cnfn asinh(float3);\n" "float4 __ovld __cnfn asinh(float4);\n" "float8 __ovld __cnfn asinh(float8);\n" "float16 __ovld __cnfn asinh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn asinh(double);\n" "double2 __ovld __cnfn asinh(double2);\n" "double3 __ovld __cnfn asinh(double3);\n" "double4 __ovld __cnfn asinh(double4);\n" "double8 __ovld __cnfn asinh(double8);\n" "double16 __ovld __cnfn asinh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn asinh(half);\n" "half2 __ovld __cnfn asinh(half2);\n" "half3 __ovld __cnfn asinh(half3);\n" "half4 __ovld __cnfn asinh(half4);\n" "half8 __ovld __cnfn asinh(half8);\n" "half16 __ovld __cnfn asinh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute asin (x) / PI.\n" " */\n" "float __ovld __cnfn asinpi(float);\n" "float2 __ovld __cnfn asinpi(float2);\n" "float3 __ovld __cnfn asinpi(float3);\n" "float4 __ovld __cnfn asinpi(float4);\n" "float8 __ovld __cnfn asinpi(float8);\n" "float16 __ovld __cnfn asinpi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn asinpi(double);\n" "double2 __ovld __cnfn asinpi(double2);\n" "double3 __ovld __cnfn asinpi(double3);\n" "double4 __ovld __cnfn asinpi(double4);\n" "double8 __ovld __cnfn asinpi(double8);\n" "double16 __ovld __cnfn asinpi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn asinpi(half);\n" "half2 __ovld __cnfn asinpi(half2);\n" "half3 __ovld __cnfn asinpi(half3);\n" "half4 __ovld __cnfn asinpi(half4);\n" "half8 __ovld __cnfn asinpi(half8);\n" "half16 __ovld __cnfn asinpi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Arc tangent function.\n" " */\n" "float __ovld __cnfn atan(float);\n" "float2 __ovld __cnfn atan(float2);\n" "float3 __ovld __cnfn atan(float3);\n" "float4 __ovld __cnfn atan(float4);\n" "float8 __ovld __cnfn atan(float8);\n" "float16 __ovld __cnfn atan(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn atan(double);\n" "double2 __ovld __cnfn atan(double2);\n" "double3 __ovld __cnfn atan(double3);\n" "double4 __ovld __cnfn atan(double4);\n" "double8 __ovld __cnfn atan(double8);\n" "double16 __ovld __cnfn atan(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn atan(half);\n" "half2 __ovld __cnfn atan(half2);\n" "half3 __ovld __cnfn atan(half3);\n" "half4 __ovld __cnfn atan(half4);\n" "half8 __ovld __cnfn atan(half8);\n" "half16 __ovld __cnfn atan(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Arc tangent of y / x.\n" " */\n" "float __ovld __cnfn atan2(float, float);\n" "float2 __ovld __cnfn atan2(float2, float2);\n" "float3 __ovld __cnfn atan2(float3, float3);\n" "float4 __ovld __cnfn atan2(float4, float4);\n" "float8 __ovld __cnfn atan2(float8, float8);\n" "float16 __ovld __cnfn atan2(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn atan2(double, double);\n" "double2 __ovld __cnfn atan2(double2, double2);\n" "double3 __ovld __cnfn atan2(double3, double3);\n" "double4 __ovld __cnfn atan2(double4, double4);\n" "double8 __ovld __cnfn atan2(double8, double8);\n" "double16 __ovld __cnfn atan2(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn atan2(half, half);\n" "half2 __ovld __cnfn atan2(half2, half2);\n" "half3 __ovld __cnfn atan2(half3, half3);\n" "half4 __ovld __cnfn atan2(half4, half4);\n" "half8 __ovld __cnfn atan2(half8, half8);\n" "half16 __ovld __cnfn atan2(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Hyperbolic arc tangent.\n" " */\n" "float __ovld __cnfn atanh(float);\n" "float2 __ovld __cnfn atanh(float2);\n" "float3 __ovld __cnfn atanh(float3);\n" "float4 __ovld __cnfn atanh(float4);\n" "float8 __ovld __cnfn atanh(float8);\n" "float16 __ovld __cnfn atanh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn atanh(double);\n" "double2 __ovld __cnfn atanh(double2);\n" "double3 __ovld __cnfn atanh(double3);\n" "double4 __ovld __cnfn atanh(double4);\n" "double8 __ovld __cnfn atanh(double8);\n" "double16 __ovld __cnfn atanh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn atanh(half);\n" "half2 __ovld __cnfn atanh(half2);\n" "half3 __ovld __cnfn atanh(half3);\n" "half4 __ovld __cnfn atanh(half4);\n" "half8 __ovld __cnfn atanh(half8);\n" "half16 __ovld __cnfn atanh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute atan (x) / PI.\n" " */\n" "float __ovld __cnfn atanpi(float);\n" "float2 __ovld __cnfn atanpi(float2);\n" "float3 __ovld __cnfn atanpi(float3);\n" "float4 __ovld __cnfn atanpi(float4);\n" "float8 __ovld __cnfn atanpi(float8);\n" "float16 __ovld __cnfn atanpi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn atanpi(double);\n" "double2 __ovld __cnfn atanpi(double2);\n" "double3 __ovld __cnfn atanpi(double3);\n" "double4 __ovld __cnfn atanpi(double4);\n" "double8 __ovld __cnfn atanpi(double8);\n" "double16 __ovld __cnfn atanpi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn atanpi(half);\n" "half2 __ovld __cnfn atanpi(half2);\n" "half3 __ovld __cnfn atanpi(half3);\n" "half4 __ovld __cnfn atanpi(half4);\n" "half8 __ovld __cnfn atanpi(half8);\n" "half16 __ovld __cnfn atanpi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute atan2 (y, x) / PI.\n" " */\n" "float __ovld __cnfn atan2pi(float, float);\n" "float2 __ovld __cnfn atan2pi(float2, float2);\n" "float3 __ovld __cnfn atan2pi(float3, float3);\n" "float4 __ovld __cnfn atan2pi(float4, float4);\n" "float8 __ovld __cnfn atan2pi(float8, float8);\n" "float16 __ovld __cnfn atan2pi(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn atan2pi(double, double);\n" "double2 __ovld __cnfn atan2pi(double2, double2);\n" "double3 __ovld __cnfn atan2pi(double3, double3);\n" "double4 __ovld __cnfn atan2pi(double4, double4);\n" "double8 __ovld __cnfn atan2pi(double8, double8);\n" "double16 __ovld __cnfn atan2pi(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn atan2pi(half, half);\n" "half2 __ovld __cnfn atan2pi(half2, half2);\n" "half3 __ovld __cnfn atan2pi(half3, half3);\n" "half4 __ovld __cnfn atan2pi(half4, half4);\n" "half8 __ovld __cnfn atan2pi(half8, half8);\n" "half16 __ovld __cnfn atan2pi(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute cube-root.\n" " */\n" "float __ovld __cnfn cbrt(float);\n" "float2 __ovld __cnfn cbrt(float2);\n" "float3 __ovld __cnfn cbrt(float3);\n" "float4 __ovld __cnfn cbrt(float4);\n" "float8 __ovld __cnfn cbrt(float8);\n" "float16 __ovld __cnfn cbrt(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn cbrt(double);\n" "double2 __ovld __cnfn cbrt(double2);\n" "double3 __ovld __cnfn cbrt(double3);\n" "double4 __ovld __cnfn cbrt(double4);\n" "double8 __ovld __cnfn cbrt(double8);\n" "double16 __ovld __cnfn cbrt(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn cbrt(half);\n" "half2 __ovld __cnfn cbrt(half2);\n" "half3 __ovld __cnfn cbrt(half3);\n" "half4 __ovld __cnfn cbrt(half4);\n" "half8 __ovld __cnfn cbrt(half8);\n" "half16 __ovld __cnfn cbrt(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Round to integral value using the round to positive\n" " * infinity rounding mode.\n" " */\n" "float __ovld __cnfn ceil(float);\n" "float2 __ovld __cnfn ceil(float2);\n" "float3 __ovld __cnfn ceil(float3);\n" "float4 __ovld __cnfn ceil(float4);\n" "float8 __ovld __cnfn ceil(float8);\n" "float16 __ovld __cnfn ceil(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn ceil(double);\n" "double2 __ovld __cnfn ceil(double2);\n" "double3 __ovld __cnfn ceil(double3);\n" "double4 __ovld __cnfn ceil(double4);\n" "double8 __ovld __cnfn ceil(double8);\n" "double16 __ovld __cnfn ceil(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn ceil(half);\n" "half2 __ovld __cnfn ceil(half2);\n" "half3 __ovld __cnfn ceil(half3);\n" "half4 __ovld __cnfn ceil(half4);\n" "half8 __ovld __cnfn ceil(half8);\n" "half16 __ovld __cnfn ceil(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns x with its sign changed to match the sign of y.\n" " */\n" "float __ovld __cnfn copysign(float, float);\n" "float2 __ovld __cnfn copysign(float2, float2);\n" "float3 __ovld __cnfn copysign(float3, float3);\n" "float4 __ovld __cnfn copysign(float4, float4);\n" "float8 __ovld __cnfn copysign(float8, float8);\n" "float16 __ovld __cnfn copysign(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn copysign(double, double);\n" "double2 __ovld __cnfn copysign(double2, double2);\n" "double3 __ovld __cnfn copysign(double3, double3);\n" "double4 __ovld __cnfn copysign(double4, double4);\n" "double8 __ovld __cnfn copysign(double8, double8);\n" "double16 __ovld __cnfn copysign(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn copysign(half, half);\n" "half2 __ovld __cnfn copysign(half2, half2);\n" "half3 __ovld __cnfn copysign(half3, half3);\n" "half4 __ovld __cnfn copysign(half4, half4);\n" "half8 __ovld __cnfn copysign(half8, half8);\n" "half16 __ovld __cnfn copysign(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute cosine.\n" " */\n" "float __ovld __cnfn cos(float);\n" "float2 __ovld __cnfn cos(float2);\n" "float3 __ovld __cnfn cos(float3);\n" "float4 __ovld __cnfn cos(float4);\n" "float8 __ovld __cnfn cos(float8);\n" "float16 __ovld __cnfn cos(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn cos(double);\n" "double2 __ovld __cnfn cos(double2);\n" "double3 __ovld __cnfn cos(double3);\n" "double4 __ovld __cnfn cos(double4);\n" "double8 __ovld __cnfn cos(double8);\n" "double16 __ovld __cnfn cos(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn cos(half);\n" "half2 __ovld __cnfn cos(half2);\n" "half3 __ovld __cnfn cos(half3);\n" "half4 __ovld __cnfn cos(half4);\n" "half8 __ovld __cnfn cos(half8);\n" "half16 __ovld __cnfn cos(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute hyperbolic cosine.\n" " */\n" "float __ovld __cnfn cosh(float);\n" "float2 __ovld __cnfn cosh(float2);\n" "float3 __ovld __cnfn cosh(float3);\n" "float4 __ovld __cnfn cosh(float4);\n" "float8 __ovld __cnfn cosh(float8);\n" "float16 __ovld __cnfn cosh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn cosh(double);\n" "double2 __ovld __cnfn cosh(double2);\n" "double3 __ovld __cnfn cosh(double3);\n" "double4 __ovld __cnfn cosh(double4);\n" "double8 __ovld __cnfn cosh(double8);\n" "double16 __ovld __cnfn cosh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn cosh(half);\n" "half2 __ovld __cnfn cosh(half2);\n" "half3 __ovld __cnfn cosh(half3);\n" "half4 __ovld __cnfn cosh(half4);\n" "half8 __ovld __cnfn cosh(half8);\n" "half16 __ovld __cnfn cosh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute cos (PI * x).\n" " */\n" "float __ovld __cnfn cospi(float);\n" "float2 __ovld __cnfn cospi(float2);\n" "float3 __ovld __cnfn cospi(float3);\n" "float4 __ovld __cnfn cospi(float4);\n" "float8 __ovld __cnfn cospi(float8);\n" "float16 __ovld __cnfn cospi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn cospi(double);\n" "double2 __ovld __cnfn cospi(double2);\n" "double3 __ovld __cnfn cospi(double3);\n" "double4 __ovld __cnfn cospi(double4);\n" "double8 __ovld __cnfn cospi(double8);\n" "double16 __ovld __cnfn cospi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn cospi(half);\n" "half2 __ovld __cnfn cospi(half2);\n" "half3 __ovld __cnfn cospi(half3);\n" "half4 __ovld __cnfn cospi(half4);\n" "half8 __ovld __cnfn cospi(half8);\n" "half16 __ovld __cnfn cospi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Complementary error function.\n" " */\n" "float __ovld __cnfn erfc(float);\n" "float2 __ovld __cnfn erfc(float2);\n" "float3 __ovld __cnfn erfc(float3);\n" "float4 __ovld __cnfn erfc(float4);\n" "float8 __ovld __cnfn erfc(float8);\n" "float16 __ovld __cnfn erfc(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn erfc(double);\n" "double2 __ovld __cnfn erfc(double2);\n" "double3 __ovld __cnfn erfc(double3);\n" "double4 __ovld __cnfn erfc(double4);\n" "double8 __ovld __cnfn erfc(double8);\n" "double16 __ovld __cnfn erfc(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn erfc(half);\n" "half2 __ovld __cnfn erfc(half2);\n" "half3 __ovld __cnfn erfc(half3);\n" "half4 __ovld __cnfn erfc(half4);\n" "half8 __ovld __cnfn erfc(half8);\n" "half16 __ovld __cnfn erfc(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Error function encountered in integrating the\n" " * normal distribution.\n" " */\n" "float __ovld __cnfn erf(float);\n" "float2 __ovld __cnfn erf(float2);\n" "float3 __ovld __cnfn erf(float3);\n" "float4 __ovld __cnfn erf(float4);\n" "float8 __ovld __cnfn erf(float8);\n" "float16 __ovld __cnfn erf(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn erf(double);\n" "double2 __ovld __cnfn erf(double2);\n" "double3 __ovld __cnfn erf(double3);\n" "double4 __ovld __cnfn erf(double4);\n" "double8 __ovld __cnfn erf(double8);\n" "double16 __ovld __cnfn erf(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn erf(half);\n" "half2 __ovld __cnfn erf(half2);\n" "half3 __ovld __cnfn erf(half3);\n" "half4 __ovld __cnfn erf(half4);\n" "half8 __ovld __cnfn erf(half8);\n" "half16 __ovld __cnfn erf(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute the base e exponential function of x.\n" " */\n" "float __ovld __cnfn exp(float);\n" "float2 __ovld __cnfn exp(float2);\n" "float3 __ovld __cnfn exp(float3);\n" "float4 __ovld __cnfn exp(float4);\n" "float8 __ovld __cnfn exp(float8);\n" "float16 __ovld __cnfn exp(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn exp(double);\n" "double2 __ovld __cnfn exp(double2);\n" "double3 __ovld __cnfn exp(double3);\n" "double4 __ovld __cnfn exp(double4);\n" "double8 __ovld __cnfn exp(double8);\n" "double16 __ovld __cnfn exp(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn exp(half);\n" "half2 __ovld __cnfn exp(half2);\n" "half3 __ovld __cnfn exp(half3);\n" "half4 __ovld __cnfn exp(half4);\n" "half8 __ovld __cnfn exp(half8);\n" "half16 __ovld __cnfn exp(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Exponential base 2 function.\n" " */\n" "float __ovld __cnfn exp2(float);\n" "float2 __ovld __cnfn exp2(float2);\n" "float3 __ovld __cnfn exp2(float3);\n" "float4 __ovld __cnfn exp2(float4);\n" "float8 __ovld __cnfn exp2(float8);\n" "float16 __ovld __cnfn exp2(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn exp2(double);\n" "double2 __ovld __cnfn exp2(double2);\n" "double3 __ovld __cnfn exp2(double3);\n" "double4 __ovld __cnfn exp2(double4);\n" "double8 __ovld __cnfn exp2(double8);\n" "double16 __ovld __cnfn exp2(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn exp2(half);\n" "half2 __ovld __cnfn exp2(half2);\n" "half3 __ovld __cnfn exp2(half3);\n" "half4 __ovld __cnfn exp2(half4);\n" "half8 __ovld __cnfn exp2(half8);\n" "half16 __ovld __cnfn exp2(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Exponential base 10 function.\n" " */\n" "float __ovld __cnfn exp10(float);\n" "float2 __ovld __cnfn exp10(float2);\n" "float3 __ovld __cnfn exp10(float3);\n" "float4 __ovld __cnfn exp10(float4);\n" "float8 __ovld __cnfn exp10(float8);\n" "float16 __ovld __cnfn exp10(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn exp10(double);\n" "double2 __ovld __cnfn exp10(double2);\n" "double3 __ovld __cnfn exp10(double3);\n" "double4 __ovld __cnfn exp10(double4);\n" "double8 __ovld __cnfn exp10(double8);\n" "double16 __ovld __cnfn exp10(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn exp10(half);\n" "half2 __ovld __cnfn exp10(half2);\n" "half3 __ovld __cnfn exp10(half3);\n" "half4 __ovld __cnfn exp10(half4);\n" "half8 __ovld __cnfn exp10(half8);\n" "half16 __ovld __cnfn exp10(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute e^x- 1.0.\n" " */\n" "float __ovld __cnfn expm1(float);\n" "float2 __ovld __cnfn expm1(float2);\n" "float3 __ovld __cnfn expm1(float3);\n" "float4 __ovld __cnfn expm1(float4);\n" "float8 __ovld __cnfn expm1(float8);\n" "float16 __ovld __cnfn expm1(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn expm1(double);\n" "double2 __ovld __cnfn expm1(double2);\n" "double3 __ovld __cnfn expm1(double3);\n" "double4 __ovld __cnfn expm1(double4);\n" "double8 __ovld __cnfn expm1(double8);\n" "double16 __ovld __cnfn expm1(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn expm1(half);\n" "half2 __ovld __cnfn expm1(half2);\n" "half3 __ovld __cnfn expm1(half3);\n" "half4 __ovld __cnfn expm1(half4);\n" "half8 __ovld __cnfn expm1(half8);\n" "half16 __ovld __cnfn expm1(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute absolute value of a floating-point number.\n" " */\n" "float __ovld __cnfn fabs(float);\n" "float2 __ovld __cnfn fabs(float2);\n" "float3 __ovld __cnfn fabs(float3);\n" "float4 __ovld __cnfn fabs(float4);\n" "float8 __ovld __cnfn fabs(float8);\n" "float16 __ovld __cnfn fabs(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fabs(double);\n" "double2 __ovld __cnfn fabs(double2);\n" "double3 __ovld __cnfn fabs(double3);\n" "double4 __ovld __cnfn fabs(double4);\n" "double8 __ovld __cnfn fabs(double8);\n" "double16 __ovld __cnfn fabs(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fabs(half);\n" "half2 __ovld __cnfn fabs(half2);\n" "half3 __ovld __cnfn fabs(half3);\n" "half4 __ovld __cnfn fabs(half4);\n" "half8 __ovld __cnfn fabs(half8);\n" "half16 __ovld __cnfn fabs(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * x - y if x > y, +0 if x is less than or equal to y.\n" " */\n" "float __ovld __cnfn fdim(float, float);\n" "float2 __ovld __cnfn fdim(float2, float2);\n" "float3 __ovld __cnfn fdim(float3, float3);\n" "float4 __ovld __cnfn fdim(float4, float4);\n" "float8 __ovld __cnfn fdim(float8, float8);\n" "float16 __ovld __cnfn fdim(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fdim(double, double);\n" "double2 __ovld __cnfn fdim(double2, double2);\n" "double3 __ovld __cnfn fdim(double3, double3);\n" "double4 __ovld __cnfn fdim(double4, double4);\n" "double8 __ovld __cnfn fdim(double8, double8);\n" "double16 __ovld __cnfn fdim(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fdim(half, half);\n" "half2 __ovld __cnfn fdim(half2, half2);\n" "half3 __ovld __cnfn fdim(half3, half3);\n" "half4 __ovld __cnfn fdim(half4, half4);\n" "half8 __ovld __cnfn fdim(half8, half8);\n" "half16 __ovld __cnfn fdim(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Round to integral value using the round to -ve\n" " * infinity rounding mode.\n" " */\n" "float __ovld __cnfn floor(float);\n" "float2 __ovld __cnfn floor(float2);\n" "float3 __ovld __cnfn floor(float3);\n" "float4 __ovld __cnfn floor(float4);\n" "float8 __ovld __cnfn floor(float8);\n" "float16 __ovld __cnfn floor(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn floor(double);\n" "double2 __ovld __cnfn floor(double2);\n" "double3 __ovld __cnfn floor(double3);\n" "double4 __ovld __cnfn floor(double4);\n" "double8 __ovld __cnfn floor(double8);\n" "double16 __ovld __cnfn floor(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn floor(half);\n" "half2 __ovld __cnfn floor(half2);\n" "half3 __ovld __cnfn floor(half3);\n" "half4 __ovld __cnfn floor(half4);\n" "half8 __ovld __cnfn floor(half8);\n" "half16 __ovld __cnfn floor(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the correctly rounded floating-point\n" " * representation of the sum of c with the infinitely\n" " * precise product of a and b. Rounding of\n" " * intermediate products shall not occur. Edge case\n" " * behavior is per the IEEE 754-2008 standard.\n" " */\n" "float __ovld __cnfn fma(float, float, float);\n" "float2 __ovld __cnfn fma(float2, float2, float2);\n" "float3 __ovld __cnfn fma(float3, float3, float3);\n" "float4 __ovld __cnfn fma(float4, float4, float4);\n" "float8 __ovld __cnfn fma(float8, float8, float8);\n" "float16 __ovld __cnfn fma(float16, float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fma(double, double, double);\n" "double2 __ovld __cnfn fma(double2, double2, double2);\n" "double3 __ovld __cnfn fma(double3, double3, double3);\n" "double4 __ovld __cnfn fma(double4, double4, double4);\n" "double8 __ovld __cnfn fma(double8, double8, double8);\n" "double16 __ovld __cnfn fma(double16, double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fma(half, half, half);\n" "half2 __ovld __cnfn fma(half2, half2, half2);\n" "half3 __ovld __cnfn fma(half3, half3, half3);\n" "half4 __ovld __cnfn fma(half4, half4, half4);\n" "half8 __ovld __cnfn fma(half8, half8, half8);\n" "half16 __ovld __cnfn fma(half16, half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns y if x < y, otherwise it returns x. If one\n" " * argument is a NaN, fmax() returns the other\n" " * argument. If both arguments are NaNs, fmax()\n" " * returns a NaN.\n" " */\n" "float __ovld __cnfn fmax(float, float);\n" "float2 __ovld __cnfn fmax(float2, float2);\n" "float3 __ovld __cnfn fmax(float3, float3);\n" "float4 __ovld __cnfn fmax(float4, float4);\n" "float8 __ovld __cnfn fmax(float8, float8);\n" "float16 __ovld __cnfn fmax(float16, float16);\n" "float2 __ovld __cnfn fmax(float2, float);\n" "float3 __ovld __cnfn fmax(float3, float);\n" "float4 __ovld __cnfn fmax(float4, float);\n" "float8 __ovld __cnfn fmax(float8, float);\n" "float16 __ovld __cnfn fmax(float16, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fmax(double, double);\n" "double2 __ovld __cnfn fmax(double2, double2);\n" "double3 __ovld __cnfn fmax(double3, double3);\n" "double4 __ovld __cnfn fmax(double4, double4);\n" "double8 __ovld __cnfn fmax(double8, double8);\n" "double16 __ovld __cnfn fmax(double16, double16);\n" "double2 __ovld __cnfn fmax(double2, double);\n" "double3 __ovld __cnfn fmax(double3, double);\n" "double4 __ovld __cnfn fmax(double4, double);\n" "double8 __ovld __cnfn fmax(double8, double);\n" "double16 __ovld __cnfn fmax(double16, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fmax(half, half);\n" "half2 __ovld __cnfn fmax(half2, half2);\n" "half3 __ovld __cnfn fmax(half3, half3);\n" "half4 __ovld __cnfn fmax(half4, half4);\n" "half8 __ovld __cnfn fmax(half8, half8);\n" "half16 __ovld __cnfn fmax(half16, half16);\n" "half2 __ovld __cnfn fmax(half2, half);\n" "half3 __ovld __cnfn fmax(half3, half);\n" "half4 __ovld __cnfn fmax(half4, half);\n" "half8 __ovld __cnfn fmax(half8, half);\n" "half16 __ovld __cnfn fmax(half16, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns y if y < x, otherwise it returns x. If one\n" " * argument is a NaN, fmin() returns the other\n" " * argument. If both arguments are NaNs, fmin()\n" " * returns a NaN.\n" " */\n" "float __ovld __cnfn fmin(float, float);\n" "float2 __ovld __cnfn fmin(float2, float2);\n" "float3 __ovld __cnfn fmin(float3, float3);\n" "float4 __ovld __cnfn fmin(float4, float4);\n" "float8 __ovld __cnfn fmin(float8, float8);\n" "float16 __ovld __cnfn fmin(float16, float16);\n" "float2 __ovld __cnfn fmin(float2, float);\n" "float3 __ovld __cnfn fmin(float3, float);\n" "float4 __ovld __cnfn fmin(float4, float);\n" "float8 __ovld __cnfn fmin(float8, float);\n" "float16 __ovld __cnfn fmin(float16, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fmin(double, double);\n" "double2 __ovld __cnfn fmin(double2, double2);\n" "double3 __ovld __cnfn fmin(double3, double3);\n" "double4 __ovld __cnfn fmin(double4, double4);\n" "double8 __ovld __cnfn fmin(double8, double8);\n" "double16 __ovld __cnfn fmin(double16, double16);\n" "double2 __ovld __cnfn fmin(double2, double);\n" "double3 __ovld __cnfn fmin(double3, double);\n" "double4 __ovld __cnfn fmin(double4, double);\n" "double8 __ovld __cnfn fmin(double8, double);\n" "double16 __ovld __cnfn fmin(double16, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fmin(half, half);\n" "half2 __ovld __cnfn fmin(half2, half2);\n" "half3 __ovld __cnfn fmin(half3, half3);\n" "half4 __ovld __cnfn fmin(half4, half4);\n" "half8 __ovld __cnfn fmin(half8, half8);\n" "half16 __ovld __cnfn fmin(half16, half16);\n" "half2 __ovld __cnfn fmin(half2, half);\n" "half3 __ovld __cnfn fmin(half3, half);\n" "half4 __ovld __cnfn fmin(half4, half);\n" "half8 __ovld __cnfn fmin(half8, half);\n" "half16 __ovld __cnfn fmin(half16, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Modulus. Returns x - y * trunc (x/y).\n" " */\n" "float __ovld __cnfn fmod(float, float);\n" "float2 __ovld __cnfn fmod(float2, float2);\n" "float3 __ovld __cnfn fmod(float3, float3);\n" "float4 __ovld __cnfn fmod(float4, float4);\n" "float8 __ovld __cnfn fmod(float8, float8);\n" "float16 __ovld __cnfn fmod(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn fmod(double, double);\n" "double2 __ovld __cnfn fmod(double2, double2);\n" "double3 __ovld __cnfn fmod(double3, double3);\n" "double4 __ovld __cnfn fmod(double4, double4);\n" "double8 __ovld __cnfn fmod(double8, double8);\n" "double16 __ovld __cnfn fmod(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn fmod(half, half);\n" "half2 __ovld __cnfn fmod(half2, half2);\n" "half3 __ovld __cnfn fmod(half3, half3);\n" "half4 __ovld __cnfn fmod(half4, half4);\n" "half8 __ovld __cnfn fmod(half8, half8);\n" "half16 __ovld __cnfn fmod(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns fmin(x - floor (x), 0x1.fffffep-1f ).\n" " * floor(x) is returned in iptr.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld fract(float, float *);\n" "float2 __ovld fract(float2, float2 *);\n" "float3 __ovld fract(float3, float3 *);\n" "float4 __ovld fract(float4, float4 *);\n" "float8 __ovld fract(float8, float8 *);\n" "float16 __ovld fract(float16, float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld fract(double, double *);\n" "double2 __ovld fract(double2, double2 *);\n" "double3 __ovld fract(double3, double3 *);\n" "double4 __ovld fract(double4, double4 *);\n" "double8 __ovld fract(double8, double8 *);\n" "double16 __ovld fract(double16, double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld fract(half, half *);\n" "half2 __ovld fract(half2, half2 *);\n" "half3 __ovld fract(half3, half3 *);\n" "half4 __ovld fract(half4, half4 *);\n" "half8 __ovld fract(half8, half8 *);\n" "half16 __ovld fract(half16, half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld fract(float, __global float *);\n" "float2 __ovld fract(float2, __global float2 *);\n" "float3 __ovld fract(float3, __global float3 *);\n" "float4 __ovld fract(float4, __global float4 *);\n" "float8 __ovld fract(float8, __global float8 *);\n" "float16 __ovld fract(float16, __global float16 *);\n" "float __ovld fract(float, __local float *);\n" "float2 __ovld fract(float2, __local float2 *);\n" "float3 __ovld fract(float3, __local float3 *);\n" "float4 __ovld fract(float4, __local float4 *);\n" "float8 __ovld fract(float8, __local float8 *);\n" "float16 __ovld fract(float16, __local float16 *);\n" "float __ovld fract(float, __private float *);\n" "float2 __ovld fract(float2, __private float2 *);\n" "float3 __ovld fract(float3, __private float3 *);\n" "float4 __ovld fract(float4, __private float4 *);\n" "float8 __ovld fract(float8, __private float8 *);\n" "float16 __ovld fract(float16, __private float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld fract(double, __global double *);\n" "double2 __ovld fract(double2, __global double2 *);\n" "double3 __ovld fract(double3, __global double3 *);\n" "double4 __ovld fract(double4, __global double4 *);\n" "double8 __ovld fract(double8, __global double8 *);\n" "double16 __ovld fract(double16, __global double16 *);\n" "double __ovld fract(double, __local double *);\n" "double2 __ovld fract(double2, __local double2 *);\n" "double3 __ovld fract(double3, __local double3 *);\n" "double4 __ovld fract(double4, __local double4 *);\n" "double8 __ovld fract(double8, __local double8 *);\n" "double16 __ovld fract(double16, __local double16 *);\n" "double __ovld fract(double, __private double *);\n" "double2 __ovld fract(double2, __private double2 *);\n" "double3 __ovld fract(double3, __private double3 *);\n" "double4 __ovld fract(double4, __private double4 *);\n" "double8 __ovld fract(double8, __private double8 *);\n" "double16 __ovld fract(double16, __private double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld fract(half, __global half *);\n" "half2 __ovld fract(half2, __global half2 *);\n" "half3 __ovld fract(half3, __global half3 *);\n" "half4 __ovld fract(half4, __global half4 *);\n" "half8 __ovld fract(half8, __global half8 *);\n" "half16 __ovld fract(half16, __global half16 *);\n" "half __ovld fract(half, __local half *);\n" "half2 __ovld fract(half2, __local half2 *);\n" "half3 __ovld fract(half3, __local half3 *);\n" "half4 __ovld fract(half4, __local half4 *);\n" "half8 __ovld fract(half8, __local half8 *);\n" "half16 __ovld fract(half16, __local half16 *);\n" "half __ovld fract(half, __private half *);\n" "half2 __ovld fract(half2, __private half2 *);\n" "half3 __ovld fract(half3, __private half3 *);\n" "half4 __ovld fract(half4, __private half4 *);\n" "half8 __ovld fract(half8, __private half8 *);\n" "half16 __ovld fract(half16, __private half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Extract mantissa and exponent from x. For each\n" " * component the mantissa returned is a float with\n" " * magnitude in the interval [1/2, 1) or 0. Each\n" " * component of x equals mantissa returned * 2^exp.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld frexp(float, int *);\n" "float2 __ovld frexp(float2, int2 *);\n" "float3 __ovld frexp(float3, int3 *);\n" "float4 __ovld frexp(float4, int4 *);\n" "float8 __ovld frexp(float8, int8 *);\n" "float16 __ovld frexp(float16, int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld frexp(double, int *);\n" "double2 __ovld frexp(double2, int2 *);\n" "double3 __ovld frexp(double3, int3 *);\n" "double4 __ovld frexp(double4, int4 *);\n" "double8 __ovld frexp(double8, int8 *);\n" "double16 __ovld frexp(double16, int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld frexp(half, int *);\n" "half2 __ovld frexp(half2, int2 *);\n" "half3 __ovld frexp(half3, int3 *);\n" "half4 __ovld frexp(half4, int4 *);\n" "half8 __ovld frexp(half8, int8 *);\n" "half16 __ovld frexp(half16, int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld frexp(float, __global int *);\n" "float2 __ovld frexp(float2, __global int2 *);\n" "float3 __ovld frexp(float3, __global int3 *);\n" "float4 __ovld frexp(float4, __global int4 *);\n" "float8 __ovld frexp(float8, __global int8 *);\n" "float16 __ovld frexp(float16, __global int16 *);\n" "float __ovld frexp(float, __local int *);\n" "float2 __ovld frexp(float2, __local int2 *);\n" "float3 __ovld frexp(float3, __local int3 *);\n" "float4 __ovld frexp(float4, __local int4 *);\n" "float8 __ovld frexp(float8, __local int8 *);\n" "float16 __ovld frexp(float16, __local int16 *);\n" "float __ovld frexp(float, __private int *);\n" "float2 __ovld frexp(float2, __private int2 *);\n" "float3 __ovld frexp(float3, __private int3 *);\n" "float4 __ovld frexp(float4, __private int4 *);\n" "float8 __ovld frexp(float8, __private int8 *);\n" "float16 __ovld frexp(float16, __private int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld frexp(double, __global int *);\n" "double2 __ovld frexp(double2, __global int2 *);\n" "double3 __ovld frexp(double3, __global int3 *);\n" "double4 __ovld frexp(double4, __global int4 *);\n" "double8 __ovld frexp(double8, __global int8 *);\n" "double16 __ovld frexp(double16, __global int16 *);\n" "double __ovld frexp(double, __local int *);\n" "double2 __ovld frexp(double2, __local int2 *);\n" "double3 __ovld frexp(double3, __local int3 *);\n" "double4 __ovld frexp(double4, __local int4 *);\n" "double8 __ovld frexp(double8, __local int8 *);\n" "double16 __ovld frexp(double16, __local int16 *);\n" "double __ovld frexp(double, __private int *);\n" "double2 __ovld frexp(double2, __private int2 *);\n" "double3 __ovld frexp(double3, __private int3 *);\n" "double4 __ovld frexp(double4, __private int4 *);\n" "double8 __ovld frexp(double8, __private int8 *);\n" "double16 __ovld frexp(double16, __private int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld frexp(half, __global int *);\n" "half2 __ovld frexp(half2, __global int2 *);\n" "half3 __ovld frexp(half3, __global int3 *);\n" "half4 __ovld frexp(half4, __global int4 *);\n" "half8 __ovld frexp(half8, __global int8 *);\n" "half16 __ovld frexp(half16, __global int16 *);\n" "half __ovld frexp(half, __local int *);\n" "half2 __ovld frexp(half2, __local int2 *);\n" "half3 __ovld frexp(half3, __local int3 *);\n" "half4 __ovld frexp(half4, __local int4 *);\n" "half8 __ovld frexp(half8, __local int8 *);\n" "half16 __ovld frexp(half16, __local int16 *);\n" "half __ovld frexp(half, __private int *);\n" "half2 __ovld frexp(half2, __private int2 *);\n" "half3 __ovld frexp(half3, __private int3 *);\n" "half4 __ovld frexp(half4, __private int4 *);\n" "half8 __ovld frexp(half8, __private int8 *);\n" "half16 __ovld frexp(half16, __private int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Compute the value of the square root of x^2 + y^2\n" " * without undue overflow or underflow.\n" " */\n" "float __ovld __cnfn hypot(float, float);\n" "float2 __ovld __cnfn hypot(float2, float2);\n" "float3 __ovld __cnfn hypot(float3, float3);\n" "float4 __ovld __cnfn hypot(float4, float4);\n" "float8 __ovld __cnfn hypot(float8, float8);\n" "float16 __ovld __cnfn hypot(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn hypot(double, double);\n" "double2 __ovld __cnfn hypot(double2, double2);\n" "double3 __ovld __cnfn hypot(double3, double3);\n" "double4 __ovld __cnfn hypot(double4, double4);\n" "double8 __ovld __cnfn hypot(double8, double8);\n" "double16 __ovld __cnfn hypot(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn hypot(half, half);\n" "half2 __ovld __cnfn hypot(half2, half2);\n" "half3 __ovld __cnfn hypot(half3, half3);\n" "half4 __ovld __cnfn hypot(half4, half4);\n" "half8 __ovld __cnfn hypot(half8, half8);\n" "half16 __ovld __cnfn hypot(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Return the exponent as an integer value.\n" " */\n" "int __ovld __cnfn ilogb(float);\n" "int2 __ovld __cnfn ilogb(float2);\n" "int3 __ovld __cnfn ilogb(float3);\n" "int4 __ovld __cnfn ilogb(float4);\n" "int8 __ovld __cnfn ilogb(float8);\n" "int16 __ovld __cnfn ilogb(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn ilogb(double);\n" "int2 __ovld __cnfn ilogb(double2);\n" "int3 __ovld __cnfn ilogb(double3);\n" "int4 __ovld __cnfn ilogb(double4);\n" "int8 __ovld __cnfn ilogb(double8);\n" "int16 __ovld __cnfn ilogb(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn ilogb(half);\n" "int2 __ovld __cnfn ilogb(half2);\n" "int3 __ovld __cnfn ilogb(half3);\n" "int4 __ovld __cnfn ilogb(half4);\n" "int8 __ovld __cnfn ilogb(half8);\n" "int16 __ovld __cnfn ilogb(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Multiply x by 2 to the power n.\n" " */\n" "float __ovld __cnfn ldexp(float, int);\n" "float2 __ovld __cnfn ldexp(float2, int2);\n" "float3 __ovld __cnfn ldexp(float3, int3);\n" "float4 __ovld __cnfn ldexp(float4, int4);\n" "float8 __ovld __cnfn ldexp(float8, int8);\n" "float16 __ovld __cnfn ldexp(float16, int16);\n" "float2 __ovld __cnfn ldexp(float2, int);\n" "float3 __ovld __cnfn ldexp(float3, int);\n" "float4 __ovld __cnfn ldexp(float4, int);\n" "float8 __ovld __cnfn ldexp(float8, int);\n" "float16 __ovld __cnfn ldexp(float16, int);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn ldexp(double, int);\n" "double2 __ovld __cnfn ldexp(double2, int2);\n" "double3 __ovld __cnfn ldexp(double3, int3);\n" "double4 __ovld __cnfn ldexp(double4, int4);\n" "double8 __ovld __cnfn ldexp(double8, int8);\n" "double16 __ovld __cnfn ldexp(double16, int16);\n" "double2 __ovld __cnfn ldexp(double2, int);\n" "double3 __ovld __cnfn ldexp(double3, int);\n" "double4 __ovld __cnfn ldexp(double4, int);\n" "double8 __ovld __cnfn ldexp(double8, int);\n" "double16 __ovld __cnfn ldexp(double16, int);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn ldexp(half, int);\n" "half2 __ovld __cnfn ldexp(half2, int2);\n" "half3 __ovld __cnfn ldexp(half3, int3);\n" "half4 __ovld __cnfn ldexp(half4, int4);\n" "half8 __ovld __cnfn ldexp(half8, int8);\n" "half16 __ovld __cnfn ldexp(half16, int16);\n" "half2 __ovld __cnfn ldexp(half2, int);\n" "half3 __ovld __cnfn ldexp(half3, int);\n" "half4 __ovld __cnfn ldexp(half4, int);\n" "half8 __ovld __cnfn ldexp(half8, int);\n" "half16 __ovld __cnfn ldexp(half16, int);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Log gamma function. Returns the natural\n" " * logarithm of the absolute value of the gamma\n" " * function. The sign of the gamma function is\n" " * returned in the signp argument of lgamma_r.\n" " */\n" "float __ovld __cnfn lgamma(float);\n" "float2 __ovld __cnfn lgamma(float2);\n" "float3 __ovld __cnfn lgamma(float3);\n" "float4 __ovld __cnfn lgamma(float4);\n" "float8 __ovld __cnfn lgamma(float8);\n" "float16 __ovld __cnfn lgamma(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn lgamma(double);\n" "double2 __ovld __cnfn lgamma(double2);\n" "double3 __ovld __cnfn lgamma(double3);\n" "double4 __ovld __cnfn lgamma(double4);\n" "double8 __ovld __cnfn lgamma(double8);\n" "double16 __ovld __cnfn lgamma(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn lgamma(half);\n" "half2 __ovld __cnfn lgamma(half2);\n" "half3 __ovld __cnfn lgamma(half3);\n" "half4 __ovld __cnfn lgamma(half4);\n" "half8 __ovld __cnfn lgamma(half8);\n" "half16 __ovld __cnfn lgamma(half16);\n" "#endif //cl_khr_fp16\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld lgamma_r(float, int *);\n" "float2 __ovld lgamma_r(float2, int2 *);\n" "float3 __ovld lgamma_r(float3, int3 *);\n" "float4 __ovld lgamma_r(float4, int4 *);\n" "float8 __ovld lgamma_r(float8, int8 *);\n" "float16 __ovld lgamma_r(float16, int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld lgamma_r(double, int *);\n" "double2 __ovld lgamma_r(double2, int2 *);\n" "double3 __ovld lgamma_r(double3, int3 *);\n" "double4 __ovld lgamma_r(double4, int4 *);\n" "double8 __ovld lgamma_r(double8, int8 *);\n" "double16 __ovld lgamma_r(double16, int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld lgamma_r(half, int *);\n" "half2 __ovld lgamma_r(half2, int2 *);\n" "half3 __ovld lgamma_r(half3, int3 *);\n" "half4 __ovld lgamma_r(half4, int4 *);\n" "half8 __ovld lgamma_r(half8, int8 *);\n" "half16 __ovld lgamma_r(half16, int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld lgamma_r(float, __global int *);\n" "float2 __ovld lgamma_r(float2, __global int2 *);\n" "float3 __ovld lgamma_r(float3, __global int3 *);\n" "float4 __ovld lgamma_r(float4, __global int4 *);\n" "float8 __ovld lgamma_r(float8, __global int8 *);\n" "float16 __ovld lgamma_r(float16, __global int16 *);\n" "float __ovld lgamma_r(float, __local int *);\n" "float2 __ovld lgamma_r(float2, __local int2 *);\n" "float3 __ovld lgamma_r(float3, __local int3 *);\n" "float4 __ovld lgamma_r(float4, __local int4 *);\n" "float8 __ovld lgamma_r(float8, __local int8 *);\n" "float16 __ovld lgamma_r(float16, __local int16 *);\n" "float __ovld lgamma_r(float, __private int *);\n" "float2 __ovld lgamma_r(float2, __private int2 *);\n" "float3 __ovld lgamma_r(float3, __private int3 *);\n" "float4 __ovld lgamma_r(float4, __private int4 *);\n" "float8 __ovld lgamma_r(float8, __private int8 *);\n" "float16 __ovld lgamma_r(float16, __private int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld lgamma_r(double, __global int *);\n" "double2 __ovld lgamma_r(double2, __global int2 *);\n" "double3 __ovld lgamma_r(double3, __global int3 *);\n" "double4 __ovld lgamma_r(double4, __global int4 *);\n" "double8 __ovld lgamma_r(double8, __global int8 *);\n" "double16 __ovld lgamma_r(double16, __global int16 *);\n" "double __ovld lgamma_r(double, __local int *);\n" "double2 __ovld lgamma_r(double2, __local int2 *);\n" "double3 __ovld lgamma_r(double3, __local int3 *);\n" "double4 __ovld lgamma_r(double4, __local int4 *);\n" "double8 __ovld lgamma_r(double8, __local int8 *);\n" "double16 __ovld lgamma_r(double16, __local int16 *);\n" "double __ovld lgamma_r(double, __private int *);\n" "double2 __ovld lgamma_r(double2, __private int2 *);\n" "double3 __ovld lgamma_r(double3, __private int3 *);\n" "double4 __ovld lgamma_r(double4, __private int4 *);\n" "double8 __ovld lgamma_r(double8, __private int8 *);\n" "double16 __ovld lgamma_r(double16, __private int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld lgamma_r(half, __global int *);\n" "half2 __ovld lgamma_r(half2, __global int2 *);\n" "half3 __ovld lgamma_r(half3, __global int3 *);\n" "half4 __ovld lgamma_r(half4, __global int4 *);\n" "half8 __ovld lgamma_r(half8, __global int8 *);\n" "half16 __ovld lgamma_r(half16, __global int16 *);\n" "half __ovld lgamma_r(half, __local int *);\n" "half2 __ovld lgamma_r(half2, __local int2 *);\n" "half3 __ovld lgamma_r(half3, __local int3 *);\n" "half4 __ovld lgamma_r(half4, __local int4 *);\n" "half8 __ovld lgamma_r(half8, __local int8 *);\n" "half16 __ovld lgamma_r(half16, __local int16 *);\n" "half __ovld lgamma_r(half, __private int *);\n" "half2 __ovld lgamma_r(half2, __private int2 *);\n" "half3 __ovld lgamma_r(half3, __private int3 *);\n" "half4 __ovld lgamma_r(half4, __private int4 *);\n" "half8 __ovld lgamma_r(half8, __private int8 *);\n" "half16 __ovld lgamma_r(half16, __private int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Compute natural logarithm.\n" " */\n" "float __ovld __cnfn log(float);\n" "float2 __ovld __cnfn log(float2);\n" "float3 __ovld __cnfn log(float3);\n" "float4 __ovld __cnfn log(float4);\n" "float8 __ovld __cnfn log(float8);\n" "float16 __ovld __cnfn log(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn log(double);\n" "double2 __ovld __cnfn log(double2);\n" "double3 __ovld __cnfn log(double3);\n" "double4 __ovld __cnfn log(double4);\n" "double8 __ovld __cnfn log(double8);\n" "double16 __ovld __cnfn log(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn log(half);\n" "half2 __ovld __cnfn log(half2);\n" "half3 __ovld __cnfn log(half3);\n" "half4 __ovld __cnfn log(half4);\n" "half8 __ovld __cnfn log(half8);\n" "half16 __ovld __cnfn log(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute a base 2 logarithm.\n" " */\n" "float __ovld __cnfn log2(float);\n" "float2 __ovld __cnfn log2(float2);\n" "float3 __ovld __cnfn log2(float3);\n" "float4 __ovld __cnfn log2(float4);\n" "float8 __ovld __cnfn log2(float8);\n" "float16 __ovld __cnfn log2(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn log2(double);\n" "double2 __ovld __cnfn log2(double2);\n" "double3 __ovld __cnfn log2(double3);\n" "double4 __ovld __cnfn log2(double4);\n" "double8 __ovld __cnfn log2(double8);\n" "double16 __ovld __cnfn log2(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn log2(half);\n" "half2 __ovld __cnfn log2(half2);\n" "half3 __ovld __cnfn log2(half3);\n" "half4 __ovld __cnfn log2(half4);\n" "half8 __ovld __cnfn log2(half8);\n" "half16 __ovld __cnfn log2(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute a base 10 logarithm.\n" " */\n" "float __ovld __cnfn log10(float);\n" "float2 __ovld __cnfn log10(float2);\n" "float3 __ovld __cnfn log10(float3);\n" "float4 __ovld __cnfn log10(float4);\n" "float8 __ovld __cnfn log10(float8);\n" "float16 __ovld __cnfn log10(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn log10(double);\n" "double2 __ovld __cnfn log10(double2);\n" "double3 __ovld __cnfn log10(double3);\n" "double4 __ovld __cnfn log10(double4);\n" "double8 __ovld __cnfn log10(double8);\n" "double16 __ovld __cnfn log10(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn log10(half);\n" "half2 __ovld __cnfn log10(half2);\n" "half3 __ovld __cnfn log10(half3);\n" "half4 __ovld __cnfn log10(half4);\n" "half8 __ovld __cnfn log10(half8);\n" "half16 __ovld __cnfn log10(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute a base e logarithm of (1.0 + x).\n" " */\n" "float __ovld __cnfn log1p(float);\n" "float2 __ovld __cnfn log1p(float2);\n" "float3 __ovld __cnfn log1p(float3);\n" "float4 __ovld __cnfn log1p(float4);\n" "float8 __ovld __cnfn log1p(float8);\n" "float16 __ovld __cnfn log1p(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn log1p(double);\n" "double2 __ovld __cnfn log1p(double2);\n" "double3 __ovld __cnfn log1p(double3);\n" "double4 __ovld __cnfn log1p(double4);\n" "double8 __ovld __cnfn log1p(double8);\n" "double16 __ovld __cnfn log1p(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn log1p(half);\n" "half2 __ovld __cnfn log1p(half2);\n" "half3 __ovld __cnfn log1p(half3);\n" "half4 __ovld __cnfn log1p(half4);\n" "half8 __ovld __cnfn log1p(half8);\n" "half16 __ovld __cnfn log1p(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute the exponent of x, which is the integral\n" " * part of logr | x |.\n" " */\n" "float __ovld __cnfn logb(float);\n" "float2 __ovld __cnfn logb(float2);\n" "float3 __ovld __cnfn logb(float3);\n" "float4 __ovld __cnfn logb(float4);\n" "float8 __ovld __cnfn logb(float8);\n" "float16 __ovld __cnfn logb(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn logb(double);\n" "double2 __ovld __cnfn logb(double2);\n" "double3 __ovld __cnfn logb(double3);\n" "double4 __ovld __cnfn logb(double4);\n" "double8 __ovld __cnfn logb(double8);\n" "double16 __ovld __cnfn logb(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn logb(half);\n" "half2 __ovld __cnfn logb(half2);\n" "half3 __ovld __cnfn logb(half3);\n" "half4 __ovld __cnfn logb(half4);\n" "half8 __ovld __cnfn logb(half8);\n" "half16 __ovld __cnfn logb(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * mad approximates a * b + c. Whether or how the\n" " * product of a * b is rounded and how supernormal or\n" " * subnormal intermediate products are handled is not\n" " * defined. mad is intended to be used where speed is\n" " * preferred over accuracy.\n" " */\n" "float __ovld __cnfn mad(float, float, float);\n" "float2 __ovld __cnfn mad(float2, float2, float2);\n" "float3 __ovld __cnfn mad(float3, float3, float3);\n" "float4 __ovld __cnfn mad(float4, float4, float4);\n" "float8 __ovld __cnfn mad(float8, float8, float8);\n" "float16 __ovld __cnfn mad(float16, float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn mad(double, double, double);\n" "double2 __ovld __cnfn mad(double2, double2, double2);\n" "double3 __ovld __cnfn mad(double3, double3, double3);\n" "double4 __ovld __cnfn mad(double4, double4, double4);\n" "double8 __ovld __cnfn mad(double8, double8, double8);\n" "double16 __ovld __cnfn mad(double16, double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn mad(half, half, half);\n" "half2 __ovld __cnfn mad(half2, half2, half2);\n" "half3 __ovld __cnfn mad(half3, half3, half3);\n" "half4 __ovld __cnfn mad(half4, half4, half4);\n" "half8 __ovld __cnfn mad(half8, half8, half8);\n" "half16 __ovld __cnfn mad(half16, half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns x if | x | > | y |, y if | y | > | x |, otherwise\n" " * fmax(x, y).\n" " */\n" "float __ovld __cnfn maxmag(float, float);\n" "float2 __ovld __cnfn maxmag(float2, float2);\n" "float3 __ovld __cnfn maxmag(float3, float3);\n" "float4 __ovld __cnfn maxmag(float4, float4);\n" "float8 __ovld __cnfn maxmag(float8, float8);\n" "float16 __ovld __cnfn maxmag(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn maxmag(double, double);\n" "double2 __ovld __cnfn maxmag(double2, double2);\n" "double3 __ovld __cnfn maxmag(double3, double3);\n" "double4 __ovld __cnfn maxmag(double4, double4);\n" "double8 __ovld __cnfn maxmag(double8, double8);\n" "double16 __ovld __cnfn maxmag(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn maxmag(half, half);\n" "half2 __ovld __cnfn maxmag(half2, half2);\n" "half3 __ovld __cnfn maxmag(half3, half3);\n" "half4 __ovld __cnfn maxmag(half4, half4);\n" "half8 __ovld __cnfn maxmag(half8, half8);\n" "half16 __ovld __cnfn maxmag(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns x if | x | < | y |, y if | y | < | x |, otherwise\n" " * fmin(x, y).\n" " */\n" "float __ovld __cnfn minmag(float, float);\n" "float2 __ovld __cnfn minmag(float2, float2);\n" "float3 __ovld __cnfn minmag(float3, float3);\n" "float4 __ovld __cnfn minmag(float4, float4);\n" "float8 __ovld __cnfn minmag(float8, float8);\n" "float16 __ovld __cnfn minmag(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn minmag(double, double);\n" "double2 __ovld __cnfn minmag(double2, double2);\n" "double3 __ovld __cnfn minmag(double3, double3);\n" "double4 __ovld __cnfn minmag(double4, double4);\n" "double8 __ovld __cnfn minmag(double8, double8);\n" "double16 __ovld __cnfn minmag(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn minmag(half, half);\n" "half2 __ovld __cnfn minmag(half2, half2);\n" "half3 __ovld __cnfn minmag(half3, half3);\n" "half4 __ovld __cnfn minmag(half4, half4);\n" "half8 __ovld __cnfn minmag(half8, half8);\n" "half16 __ovld __cnfn minmag(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Decompose a floating-point number. The modf\n" " * function breaks the argument x into integral and\n" " * fractional parts, each of which has the same sign as\n" " * the argument. It stores the integral part in the object\n" " * pointed to by iptr.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld modf(float, float *);\n" "float2 __ovld modf(float2, float2 *);\n" "float3 __ovld modf(float3, float3 *);\n" "float4 __ovld modf(float4, float4 *);\n" "float8 __ovld modf(float8, float8 *);\n" "float16 __ovld modf(float16, float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld modf(double, double *);\n" "double2 __ovld modf(double2, double2 *);\n" "double3 __ovld modf(double3, double3 *);\n" "double4 __ovld modf(double4, double4 *);\n" "double8 __ovld modf(double8, double8 *);\n" "double16 __ovld modf(double16, double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld modf(half, half *);\n" "half2 __ovld modf(half2, half2 *);\n" "half3 __ovld modf(half3, half3 *);\n" "half4 __ovld modf(half4, half4 *);\n" "half8 __ovld modf(half8, half8 *);\n" "half16 __ovld modf(half16, half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld modf(float, __global float *);\n" "float2 __ovld modf(float2, __global float2 *);\n" "float3 __ovld modf(float3, __global float3 *);\n" "float4 __ovld modf(float4, __global float4 *);\n" "float8 __ovld modf(float8, __global float8 *);\n" "float16 __ovld modf(float16, __global float16 *);\n" "float __ovld modf(float, __local float *);\n" "float2 __ovld modf(float2, __local float2 *);\n" "float3 __ovld modf(float3, __local float3 *);\n" "float4 __ovld modf(float4, __local float4 *);\n" "float8 __ovld modf(float8, __local float8 *);\n" "float16 __ovld modf(float16, __local float16 *);\n" "float __ovld modf(float, __private float *);\n" "float2 __ovld modf(float2, __private float2 *);\n" "float3 __ovld modf(float3, __private float3 *);\n" "float4 __ovld modf(float4, __private float4 *);\n" "float8 __ovld modf(float8, __private float8 *);\n" "float16 __ovld modf(float16, __private float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld modf(double, __global double *);\n" "double2 __ovld modf(double2, __global double2 *);\n" "double3 __ovld modf(double3, __global double3 *);\n" "double4 __ovld modf(double4, __global double4 *);\n" "double8 __ovld modf(double8, __global double8 *);\n" "double16 __ovld modf(double16, __global double16 *);\n" "double __ovld modf(double, __local double *);\n" "double2 __ovld modf(double2, __local double2 *);\n" "double3 __ovld modf(double3, __local double3 *);\n" "double4 __ovld modf(double4, __local double4 *);\n" "double8 __ovld modf(double8, __local double8 *);\n" "double16 __ovld modf(double16, __local double16 *);\n" "double __ovld modf(double, __private double *);\n" "double2 __ovld modf(double2, __private double2 *);\n" "double3 __ovld modf(double3, __private double3 *);\n" "double4 __ovld modf(double4, __private double4 *);\n" "double8 __ovld modf(double8, __private double8 *);\n" "double16 __ovld modf(double16, __private double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld modf(half, __global half *);\n" "half2 __ovld modf(half2, __global half2 *);\n" "half3 __ovld modf(half3, __global half3 *);\n" "half4 __ovld modf(half4, __global half4 *);\n" "half8 __ovld modf(half8, __global half8 *);\n" "half16 __ovld modf(half16, __global half16 *);\n" "half __ovld modf(half, __local half *);\n" "half2 __ovld modf(half2, __local half2 *);\n" "half3 __ovld modf(half3, __local half3 *);\n" "half4 __ovld modf(half4, __local half4 *);\n" "half8 __ovld modf(half8, __local half8 *);\n" "half16 __ovld modf(half16, __local half16 *);\n" "half __ovld modf(half, __private half *);\n" "half2 __ovld modf(half2, __private half2 *);\n" "half3 __ovld modf(half3, __private half3 *);\n" "half4 __ovld modf(half4, __private half4 *);\n" "half8 __ovld modf(half8, __private half8 *);\n" "half16 __ovld modf(half16, __private half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Returns a quiet NaN. The nancode may be placed\n" " * in the significand of the resulting NaN.\n" " */\n" "float __ovld __cnfn nan(uint);\n" "float2 __ovld __cnfn nan(uint2);\n" "float3 __ovld __cnfn nan(uint3);\n" "float4 __ovld __cnfn nan(uint4);\n" "float8 __ovld __cnfn nan(uint8);\n" "float16 __ovld __cnfn nan(uint16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn nan(ulong);\n" "double2 __ovld __cnfn nan(ulong2);\n" "double3 __ovld __cnfn nan(ulong3);\n" "double4 __ovld __cnfn nan(ulong4);\n" "double8 __ovld __cnfn nan(ulong8);\n" "double16 __ovld __cnfn nan(ulong16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn nan(ushort);\n" "half2 __ovld __cnfn nan(ushort2);\n" "half3 __ovld __cnfn nan(ushort3);\n" "half4 __ovld __cnfn nan(ushort4);\n" "half8 __ovld __cnfn nan(ushort8);\n" "half16 __ovld __cnfn nan(ushort16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Computes the next representable single-precision\n" " * floating-point value following x in the direction of\n" " * y. Thus, if y is less than x, nextafter() returns the\n" " * largest representable floating-point number less\n" " * than x.\n" " */\n" "float __ovld __cnfn nextafter(float, float);\n" "float2 __ovld __cnfn nextafter(float2, float2);\n" "float3 __ovld __cnfn nextafter(float3, float3);\n" "float4 __ovld __cnfn nextafter(float4, float4);\n" "float8 __ovld __cnfn nextafter(float8, float8);\n" "float16 __ovld __cnfn nextafter(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn nextafter(double, double);\n" "double2 __ovld __cnfn nextafter(double2, double2);\n" "double3 __ovld __cnfn nextafter(double3, double3);\n" "double4 __ovld __cnfn nextafter(double4, double4);\n" "double8 __ovld __cnfn nextafter(double8, double8);\n" "double16 __ovld __cnfn nextafter(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn nextafter(half, half);\n" "half2 __ovld __cnfn nextafter(half2, half2);\n" "half3 __ovld __cnfn nextafter(half3, half3);\n" "half4 __ovld __cnfn nextafter(half4, half4);\n" "half8 __ovld __cnfn nextafter(half8, half8);\n" "half16 __ovld __cnfn nextafter(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute x to the power y.\n" " */\n" "float __ovld __cnfn pow(float, float);\n" "float2 __ovld __cnfn pow(float2, float2);\n" "float3 __ovld __cnfn pow(float3, float3);\n" "float4 __ovld __cnfn pow(float4, float4);\n" "float8 __ovld __cnfn pow(float8, float8);\n" "float16 __ovld __cnfn pow(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn pow(double, double);\n" "double2 __ovld __cnfn pow(double2, double2);\n" "double3 __ovld __cnfn pow(double3, double3);\n" "double4 __ovld __cnfn pow(double4, double4);\n" "double8 __ovld __cnfn pow(double8, double8);\n" "double16 __ovld __cnfn pow(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn pow(half, half);\n" "half2 __ovld __cnfn pow(half2, half2);\n" "half3 __ovld __cnfn pow(half3, half3);\n" "half4 __ovld __cnfn pow(half4, half4);\n" "half8 __ovld __cnfn pow(half8, half8);\n" "half16 __ovld __cnfn pow(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute x to the power y, where y is an integer.\n" " */\n" "float __ovld __cnfn pown(float, int);\n" "float2 __ovld __cnfn pown(float2, int2);\n" "float3 __ovld __cnfn pown(float3, int3);\n" "float4 __ovld __cnfn pown(float4, int4);\n" "float8 __ovld __cnfn pown(float8, int8);\n" "float16 __ovld __cnfn pown(float16, int16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn pown(double, int);\n" "double2 __ovld __cnfn pown(double2, int2);\n" "double3 __ovld __cnfn pown(double3, int3);\n" "double4 __ovld __cnfn pown(double4, int4);\n" "double8 __ovld __cnfn pown(double8, int8);\n" "double16 __ovld __cnfn pown(double16, int16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn pown(half, int);\n" "half2 __ovld __cnfn pown(half2, int2);\n" "half3 __ovld __cnfn pown(half3, int3);\n" "half4 __ovld __cnfn pown(half4, int4);\n" "half8 __ovld __cnfn pown(half8, int8);\n" "half16 __ovld __cnfn pown(half16, int16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute x to the power y, where x is >= 0.\n" " */\n" "float __ovld __cnfn powr(float, float);\n" "float2 __ovld __cnfn powr(float2, float2);\n" "float3 __ovld __cnfn powr(float3, float3);\n" "float4 __ovld __cnfn powr(float4, float4);\n" "float8 __ovld __cnfn powr(float8, float8);\n" "float16 __ovld __cnfn powr(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn powr(double, double);\n" "double2 __ovld __cnfn powr(double2, double2);\n" "double3 __ovld __cnfn powr(double3, double3);\n" "double4 __ovld __cnfn powr(double4, double4);\n" "double8 __ovld __cnfn powr(double8, double8);\n" "double16 __ovld __cnfn powr(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn powr(half, half);\n" "half2 __ovld __cnfn powr(half2, half2);\n" "half3 __ovld __cnfn powr(half3, half3);\n" "half4 __ovld __cnfn powr(half4, half4);\n" "half8 __ovld __cnfn powr(half8, half8);\n" "half16 __ovld __cnfn powr(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute the value r such that r = x - n*y, where n\n" " * is the integer nearest the exact value of x/y. If there\n" " * are two integers closest to x/y, n shall be the even\n" " * one. If r is zero, it is given the same sign as x.\n" " */\n" "float __ovld __cnfn remainder(float, float);\n" "float2 __ovld __cnfn remainder(float2, float2);\n" "float3 __ovld __cnfn remainder(float3, float3);\n" "float4 __ovld __cnfn remainder(float4, float4);\n" "float8 __ovld __cnfn remainder(float8, float8);\n" "float16 __ovld __cnfn remainder(float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn remainder(double, double);\n" "double2 __ovld __cnfn remainder(double2, double2);\n" "double3 __ovld __cnfn remainder(double3, double3);\n" "double4 __ovld __cnfn remainder(double4, double4);\n" "double8 __ovld __cnfn remainder(double8, double8);\n" "double16 __ovld __cnfn remainder(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn remainder(half, half);\n" "half2 __ovld __cnfn remainder(half2, half2);\n" "half3 __ovld __cnfn remainder(half3, half3);\n" "half4 __ovld __cnfn remainder(half4, half4);\n" "half8 __ovld __cnfn remainder(half8, half8);\n" "half16 __ovld __cnfn remainder(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * The remquo function computes the value r such\n" " * that r = x - n*y, where n is the integer nearest the\n" " * exact value of x/y. If there are two integers closest\n" " * to x/y, n shall be the even one. If r is zero, it is\n" " * given the same sign as x. This is the same value\n" " * that is returned by the remainder function.\n" " * remquo also calculates the lower seven bits of the\n" " * integral quotient x/y, and gives that value the same\n" " * sign as x/y. It stores this signed value in the object\n" " * pointed to by quo.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld remquo(float, float, int *);\n" "float2 __ovld remquo(float2, float2, int2 *);\n" "float3 __ovld remquo(float3, float3, int3 *);\n" "float4 __ovld remquo(float4, float4, int4 *);\n" "float8 __ovld remquo(float8, float8, int8 *);\n" "float16 __ovld remquo(float16, float16, int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld remquo(double, double, int *);\n" "double2 __ovld remquo(double2, double2, int2 *);\n" "double3 __ovld remquo(double3, double3, int3 *);\n" "double4 __ovld remquo(double4, double4, int4 *);\n" "double8 __ovld remquo(double8, double8, int8 *);\n" "double16 __ovld remquo(double16, double16, int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld remquo(half, half, int *);\n" "half2 __ovld remquo(half2, half2, int2 *);\n" "half3 __ovld remquo(half3, half3, int3 *);\n" "half4 __ovld remquo(half4, half4, int4 *);\n" "half8 __ovld remquo(half8, half8, int8 *);\n" "half16 __ovld remquo(half16, half16, int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld remquo(float, float, __global int *);\n" "float2 __ovld remquo(float2, float2, __global int2 *);\n" "float3 __ovld remquo(float3, float3, __global int3 *);\n" "float4 __ovld remquo(float4, float4, __global int4 *);\n" "float8 __ovld remquo(float8, float8, __global int8 *);\n" "float16 __ovld remquo(float16, float16, __global int16 *);\n" "float __ovld remquo(float, float, __local int *);\n" "float2 __ovld remquo(float2, float2, __local int2 *);\n" "float3 __ovld remquo(float3, float3, __local int3 *);\n" "float4 __ovld remquo(float4, float4, __local int4 *);\n" "float8 __ovld remquo(float8, float8, __local int8 *);\n" "float16 __ovld remquo(float16, float16, __local int16 *);\n" "float __ovld remquo(float, float, __private int *);\n" "float2 __ovld remquo(float2, float2, __private int2 *);\n" "float3 __ovld remquo(float3, float3, __private int3 *);\n" "float4 __ovld remquo(float4, float4, __private int4 *);\n" "float8 __ovld remquo(float8, float8, __private int8 *);\n" "float16 __ovld remquo(float16, float16, __private int16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld remquo(double, double, __global int *);\n" "double2 __ovld remquo(double2, double2, __global int2 *);\n" "double3 __ovld remquo(double3, double3, __global int3 *);\n" "double4 __ovld remquo(double4, double4, __global int4 *);\n" "double8 __ovld remquo(double8, double8, __global int8 *);\n" "double16 __ovld remquo(double16, double16, __global int16 *);\n" "double __ovld remquo(double, double, __local int *);\n" "double2 __ovld remquo(double2, double2, __local int2 *);\n" "double3 __ovld remquo(double3, double3, __local int3 *);\n" "double4 __ovld remquo(double4, double4, __local int4 *);\n" "double8 __ovld remquo(double8, double8, __local int8 *);\n" "double16 __ovld remquo(double16, double16, __local int16 *);\n" "double __ovld remquo(double, double, __private int *);\n" "double2 __ovld remquo(double2, double2, __private int2 *);\n" "double3 __ovld remquo(double3, double3, __private int3 *);\n" "double4 __ovld remquo(double4, double4, __private int4 *);\n" "double8 __ovld remquo(double8, double8, __private int8 *);\n" "double16 __ovld remquo(double16, double16, __private int16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld remquo(half, half, __global int *);\n" "half2 __ovld remquo(half2, half2, __global int2 *);\n" "half3 __ovld remquo(half3, half3, __global int3 *);\n" "half4 __ovld remquo(half4, half4, __global int4 *);\n" "half8 __ovld remquo(half8, half8, __global int8 *);\n" "half16 __ovld remquo(half16, half16, __global int16 *);\n" "half __ovld remquo(half, half, __local int *);\n" "half2 __ovld remquo(half2, half2, __local int2 *);\n" "half3 __ovld remquo(half3, half3, __local int3 *);\n" "half4 __ovld remquo(half4, half4, __local int4 *);\n" "half8 __ovld remquo(half8, half8, __local int8 *);\n" "half16 __ovld remquo(half16, half16, __local int16 *);\n" "half __ovld remquo(half, half, __private int *);\n" "half2 __ovld remquo(half2, half2, __private int2 *);\n" "half3 __ovld remquo(half3, half3, __private int3 *);\n" "half4 __ovld remquo(half4, half4, __private int4 *);\n" "half8 __ovld remquo(half8, half8, __private int8 *);\n" "half16 __ovld remquo(half16, half16, __private int16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "/**\n" " * Round to integral value (using round to nearest\n" " * even rounding mode) in floating-point format.\n" " * Refer to section 7.1 for description of rounding\n" " * modes.\n" " */\n" "float __ovld __cnfn rint(float);\n" "float2 __ovld __cnfn rint(float2);\n" "float3 __ovld __cnfn rint(float3);\n" "float4 __ovld __cnfn rint(float4);\n" "float8 __ovld __cnfn rint(float8);\n" "float16 __ovld __cnfn rint(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn rint(double);\n" "double2 __ovld __cnfn rint(double2);\n" "double3 __ovld __cnfn rint(double3);\n" "double4 __ovld __cnfn rint(double4);\n" "double8 __ovld __cnfn rint(double8);\n" "double16 __ovld __cnfn rint(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn rint(half);\n" "half2 __ovld __cnfn rint(half2);\n" "half3 __ovld __cnfn rint(half3);\n" "half4 __ovld __cnfn rint(half4);\n" "half8 __ovld __cnfn rint(half8);\n" "half16 __ovld __cnfn rint(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute x to the power 1/y.\n" " */\n" "float __ovld __cnfn rootn(float, int);\n" "float2 __ovld __cnfn rootn(float2, int2);\n" "float3 __ovld __cnfn rootn(float3, int3);\n" "float4 __ovld __cnfn rootn(float4, int4);\n" "float8 __ovld __cnfn rootn(float8, int8);\n" "float16 __ovld __cnfn rootn(float16, int16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn rootn(double, int);\n" "double2 __ovld __cnfn rootn(double2, int2);\n" "double3 __ovld __cnfn rootn(double3, int3);\n" "double4 __ovld __cnfn rootn(double4, int4);\n" "double8 __ovld __cnfn rootn(double8, int8);\n" "double16 __ovld __cnfn rootn(double16, int16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn rootn(half, int);\n" "half2 __ovld __cnfn rootn(half2, int2);\n" "half3 __ovld __cnfn rootn(half3, int3);\n" "half4 __ovld __cnfn rootn(half4, int4);\n" "half8 __ovld __cnfn rootn(half8, int8);\n" "half16 __ovld __cnfn rootn(half16, int16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Return the integral value nearest to x rounding\n" " * halfway cases away from zero, regardless of the\n" " * current rounding direction.\n" " */\n" "float __ovld __cnfn round(float);\n" "float2 __ovld __cnfn round(float2);\n" "float3 __ovld __cnfn round(float3);\n" "float4 __ovld __cnfn round(float4);\n" "float8 __ovld __cnfn round(float8);\n" "float16 __ovld __cnfn round(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn round(double);\n" "double2 __ovld __cnfn round(double2);\n" "double3 __ovld __cnfn round(double3);\n" "double4 __ovld __cnfn round(double4);\n" "double8 __ovld __cnfn round(double8);\n" "double16 __ovld __cnfn round(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn round(half);\n" "half2 __ovld __cnfn round(half2);\n" "half3 __ovld __cnfn round(half3);\n" "half4 __ovld __cnfn round(half4);\n" "half8 __ovld __cnfn round(half8);\n" "half16 __ovld __cnfn round(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute inverse square root.\n" " */\n" "float __ovld __cnfn rsqrt(float);\n" "float2 __ovld __cnfn rsqrt(float2);\n" "float3 __ovld __cnfn rsqrt(float3);\n" "float4 __ovld __cnfn rsqrt(float4);\n" "float8 __ovld __cnfn rsqrt(float8);\n" "float16 __ovld __cnfn rsqrt(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn rsqrt(double);\n" "double2 __ovld __cnfn rsqrt(double2);\n" "double3 __ovld __cnfn rsqrt(double3);\n" "double4 __ovld __cnfn rsqrt(double4);\n" "double8 __ovld __cnfn rsqrt(double8);\n" "double16 __ovld __cnfn rsqrt(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn rsqrt(half);\n" "half2 __ovld __cnfn rsqrt(half2);\n" "half3 __ovld __cnfn rsqrt(half3);\n" "half4 __ovld __cnfn rsqrt(half4);\n" "half8 __ovld __cnfn rsqrt(half8);\n" "half16 __ovld __cnfn rsqrt(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute sine.\n" " */\n" "float __ovld __cnfn sin(float);\n" "float2 __ovld __cnfn sin(float2);\n" "float3 __ovld __cnfn sin(float3);\n" "float4 __ovld __cnfn sin(float4);\n" "float8 __ovld __cnfn sin(float8);\n" "float16 __ovld __cnfn sin(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn sin(double);\n" "double2 __ovld __cnfn sin(double2);\n" "double3 __ovld __cnfn sin(double3);\n" "double4 __ovld __cnfn sin(double4);\n" "double8 __ovld __cnfn sin(double8);\n" "double16 __ovld __cnfn sin(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn sin(half);\n" "half2 __ovld __cnfn sin(half2);\n" "half3 __ovld __cnfn sin(half3);\n" "half4 __ovld __cnfn sin(half4);\n" "half8 __ovld __cnfn sin(half8);\n" "half16 __ovld __cnfn sin(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute sine and cosine of x. The computed sine\n" " * is the return value and computed cosine is returned\n" " * in cosval.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld sincos(float, float *);\n" "float2 __ovld sincos(float2, float2 *);\n" "float3 __ovld sincos(float3, float3 *);\n" "float4 __ovld sincos(float4, float4 *);\n" "float8 __ovld sincos(float8, float8 *);\n" "float16 __ovld sincos(float16, float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld sincos(double, double *);\n" "double2 __ovld sincos(double2, double2 *);\n" "double3 __ovld sincos(double3, double3 *);\n" "double4 __ovld sincos(double4, double4 *);\n" "double8 __ovld sincos(double8, double8 *);\n" "double16 __ovld sincos(double16, double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld sincos(half, half *);\n" "half2 __ovld sincos(half2, half2 *);\n" "half3 __ovld sincos(half3, half3 *);\n" "half4 __ovld sincos(half4, half4 *);\n" "half8 __ovld sincos(half8, half8 *);\n" "half16 __ovld sincos(half16, half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld sincos(float, __global float *);\n" "float2 __ovld sincos(float2, __global float2 *);\n" "float3 __ovld sincos(float3, __global float3 *);\n" "float4 __ovld sincos(float4, __global float4 *);\n" "float8 __ovld sincos(float8, __global float8 *);\n" "float16 __ovld sincos(float16, __global float16 *);\n" "float __ovld sincos(float, __local float *);\n" "float2 __ovld sincos(float2, __local float2 *);\n" "float3 __ovld sincos(float3, __local float3 *);\n" "float4 __ovld sincos(float4, __local float4 *);\n" "float8 __ovld sincos(float8, __local float8 *);\n" "float16 __ovld sincos(float16, __local float16 *);\n" "float __ovld sincos(float, __private float *);\n" "float2 __ovld sincos(float2, __private float2 *);\n" "float3 __ovld sincos(float3, __private float3 *);\n" "float4 __ovld sincos(float4, __private float4 *);\n" "float8 __ovld sincos(float8, __private float8 *);\n" "float16 __ovld sincos(float16, __private float16 *);\n" "#ifdef cl_khr_fp64\n" "double __ovld sincos(double, __global double *);\n" "double2 __ovld sincos(double2, __global double2 *);\n" "double3 __ovld sincos(double3, __global double3 *);\n" "double4 __ovld sincos(double4, __global double4 *);\n" "double8 __ovld sincos(double8, __global double8 *);\n" "double16 __ovld sincos(double16, __global double16 *);\n" "double __ovld sincos(double, __local double *);\n" "double2 __ovld sincos(double2, __local double2 *);\n" "double3 __ovld sincos(double3, __local double3 *);\n" "double4 __ovld sincos(double4, __local double4 *);\n" "double8 __ovld sincos(double8, __local double8 *);\n" "double16 __ovld sincos(double16, __local double16 *);\n" "double __ovld sincos(double, __private double *);\n" "double2 __ovld sincos(double2, __private double2 *);\n" "double3 __ovld sincos(double3, __private double3 *);\n" "double4 __ovld sincos(double4, __private double4 *);\n" "double8 __ovld sincos(double8, __private double8 *);\n" "double16 __ovld sincos(double16, __private double16 *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld sincos(half, __global half *);\n" "half2 __ovld sincos(half2, __global half2 *);\n" "half3 __ovld sincos(half3, __global half3 *);\n" "half4 __ovld sincos(half4, __global half4 *);\n" "half8 __ovld sincos(half8, __global half8 *);\n" "half16 __ovld sincos(half16, __global half16 *);\n" "half __ovld sincos(half, __local half *);\n" "half2 __ovld sincos(half2, __local half2 *);\n" "half3 __ovld sincos(half3, __local half3 *);\n" "half4 __ovld sincos(half4, __local half4 *);\n" "half8 __ovld sincos(half8, __local half8 *);\n" "half16 __ovld sincos(half16, __local half16 *);\n" "half __ovld sincos(half, __private half *);\n" "half2 __ovld sincos(half2, __private half2 *);\n" "half3 __ovld sincos(half3, __private half3 *);\n" "half4 __ovld sincos(half4, __private half4 *);\n" "half8 __ovld sincos(half8, __private half8 *);\n" "half16 __ovld sincos(half16, __private half16 *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Compute hyperbolic sine.\n" " */\n" "float __ovld __cnfn sinh(float);\n" "float2 __ovld __cnfn sinh(float2);\n" "float3 __ovld __cnfn sinh(float3);\n" "float4 __ovld __cnfn sinh(float4);\n" "float8 __ovld __cnfn sinh(float8);\n" "float16 __ovld __cnfn sinh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn sinh(double);\n" "double2 __ovld __cnfn sinh(double2);\n" "double3 __ovld __cnfn sinh(double3);\n" "double4 __ovld __cnfn sinh(double4);\n" "double8 __ovld __cnfn sinh(double8);\n" "double16 __ovld __cnfn sinh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn sinh(half);\n" "half2 __ovld __cnfn sinh(half2);\n" "half3 __ovld __cnfn sinh(half3);\n" "half4 __ovld __cnfn sinh(half4);\n" "half8 __ovld __cnfn sinh(half8);\n" "half16 __ovld __cnfn sinh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute sin (PI * x).\n" " */\n" "float __ovld __cnfn sinpi(float);\n" "float2 __ovld __cnfn sinpi(float2);\n" "float3 __ovld __cnfn sinpi(float3);\n" "float4 __ovld __cnfn sinpi(float4);\n" "float8 __ovld __cnfn sinpi(float8);\n" "float16 __ovld __cnfn sinpi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn sinpi(double);\n" "double2 __ovld __cnfn sinpi(double2);\n" "double3 __ovld __cnfn sinpi(double3);\n" "double4 __ovld __cnfn sinpi(double4);\n" "double8 __ovld __cnfn sinpi(double8);\n" "double16 __ovld __cnfn sinpi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn sinpi(half);\n" "half2 __ovld __cnfn sinpi(half2);\n" "half3 __ovld __cnfn sinpi(half3);\n" "half4 __ovld __cnfn sinpi(half4);\n" "half8 __ovld __cnfn sinpi(half8);\n" "half16 __ovld __cnfn sinpi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute square root.\n" " */\n" "float __ovld __cnfn sqrt(float);\n" "float2 __ovld __cnfn sqrt(float2);\n" "float3 __ovld __cnfn sqrt(float3);\n" "float4 __ovld __cnfn sqrt(float4);\n" "float8 __ovld __cnfn sqrt(float8);\n" "float16 __ovld __cnfn sqrt(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn sqrt(double);\n" "double2 __ovld __cnfn sqrt(double2);\n" "double3 __ovld __cnfn sqrt(double3);\n" "double4 __ovld __cnfn sqrt(double4);\n" "double8 __ovld __cnfn sqrt(double8);\n" "double16 __ovld __cnfn sqrt(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn sqrt(half);\n" "half2 __ovld __cnfn sqrt(half2);\n" "half3 __ovld __cnfn sqrt(half3);\n" "half4 __ovld __cnfn sqrt(half4);\n" "half8 __ovld __cnfn sqrt(half8);\n" "half16 __ovld __cnfn sqrt(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute tangent.\n" " */\n" "float __ovld __cnfn tan(float);\n" "float2 __ovld __cnfn tan(float2);\n" "float3 __ovld __cnfn tan(float3);\n" "float4 __ovld __cnfn tan(float4);\n" "float8 __ovld __cnfn tan(float8);\n" "float16 __ovld __cnfn tan(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn tan(double);\n" "double2 __ovld __cnfn tan(double2);\n" "double3 __ovld __cnfn tan(double3);\n" "double4 __ovld __cnfn tan(double4);\n" "double8 __ovld __cnfn tan(double8);\n" "double16 __ovld __cnfn tan(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn tan(half);\n" "half2 __ovld __cnfn tan(half2);\n" "half3 __ovld __cnfn tan(half3);\n" "half4 __ovld __cnfn tan(half4);\n" "half8 __ovld __cnfn tan(half8);\n" "half16 __ovld __cnfn tan(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute hyperbolic tangent.\n" " */\n" "float __ovld __cnfn tanh(float);\n" "float2 __ovld __cnfn tanh(float2);\n" "float3 __ovld __cnfn tanh(float3);\n" "float4 __ovld __cnfn tanh(float4);\n" "float8 __ovld __cnfn tanh(float8);\n" "float16 __ovld __cnfn tanh(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn tanh(double);\n" "double2 __ovld __cnfn tanh(double2);\n" "double3 __ovld __cnfn tanh(double3);\n" "double4 __ovld __cnfn tanh(double4);\n" "double8 __ovld __cnfn tanh(double8);\n" "double16 __ovld __cnfn tanh(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn tanh(half);\n" "half2 __ovld __cnfn tanh(half2);\n" "half3 __ovld __cnfn tanh(half3);\n" "half4 __ovld __cnfn tanh(half4);\n" "half8 __ovld __cnfn tanh(half8);\n" "half16 __ovld __cnfn tanh(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute tan (PI * x).\n" " */\n" "float __ovld __cnfn tanpi(float);\n" "float2 __ovld __cnfn tanpi(float2);\n" "float3 __ovld __cnfn tanpi(float3);\n" "float4 __ovld __cnfn tanpi(float4);\n" "float8 __ovld __cnfn tanpi(float8);\n" "float16 __ovld __cnfn tanpi(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn tanpi(double);\n" "double2 __ovld __cnfn tanpi(double2);\n" "double3 __ovld __cnfn tanpi(double3);\n" "double4 __ovld __cnfn tanpi(double4);\n" "double8 __ovld __cnfn tanpi(double8);\n" "double16 __ovld __cnfn tanpi(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn tanpi(half);\n" "half2 __ovld __cnfn tanpi(half2);\n" "half3 __ovld __cnfn tanpi(half3);\n" "half4 __ovld __cnfn tanpi(half4);\n" "half8 __ovld __cnfn tanpi(half8);\n" "half16 __ovld __cnfn tanpi(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute the gamma function.\n" " */\n" "float __ovld __cnfn tgamma(float);\n" "float2 __ovld __cnfn tgamma(float2);\n" "float3 __ovld __cnfn tgamma(float3);\n" "float4 __ovld __cnfn tgamma(float4);\n" "float8 __ovld __cnfn tgamma(float8);\n" "float16 __ovld __cnfn tgamma(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn tgamma(double);\n" "double2 __ovld __cnfn tgamma(double2);\n" "double3 __ovld __cnfn tgamma(double3);\n" "double4 __ovld __cnfn tgamma(double4);\n" "double8 __ovld __cnfn tgamma(double8);\n" "double16 __ovld __cnfn tgamma(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn tgamma(half);\n" "half2 __ovld __cnfn tgamma(half2);\n" "half3 __ovld __cnfn tgamma(half3);\n" "half4 __ovld __cnfn tgamma(half4);\n" "half8 __ovld __cnfn tgamma(half8);\n" "half16 __ovld __cnfn tgamma(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Round to integral value using the round to zero\n" " * rounding mode.\n" " */\n" "float __ovld __cnfn trunc(float);\n" "float2 __ovld __cnfn trunc(float2);\n" "float3 __ovld __cnfn trunc(float3);\n" "float4 __ovld __cnfn trunc(float4);\n" "float8 __ovld __cnfn trunc(float8);\n" "float16 __ovld __cnfn trunc(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn trunc(double);\n" "double2 __ovld __cnfn trunc(double2);\n" "double3 __ovld __cnfn trunc(double3);\n" "double4 __ovld __cnfn trunc(double4);\n" "double8 __ovld __cnfn trunc(double8);\n" "double16 __ovld __cnfn trunc(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn trunc(half);\n" "half2 __ovld __cnfn trunc(half2);\n" "half3 __ovld __cnfn trunc(half3);\n" "half4 __ovld __cnfn trunc(half4);\n" "half8 __ovld __cnfn trunc(half8);\n" "half16 __ovld __cnfn trunc(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute cosine. x must be in the range -2^16 ... +2^16.\n" " */\n" "float __ovld __cnfn half_cos(float);\n" "float2 __ovld __cnfn half_cos(float2);\n" "float3 __ovld __cnfn half_cos(float3);\n" "float4 __ovld __cnfn half_cos(float4);\n" "float8 __ovld __cnfn half_cos(float8);\n" "float16 __ovld __cnfn half_cos(float16);\n" "\n" "/**\n" " * Compute x / y.\n" " */\n" "float __ovld __cnfn half_divide(float, float);\n" "float2 __ovld __cnfn half_divide(float2, float2);\n" "float3 __ovld __cnfn half_divide(float3, float3);\n" "float4 __ovld __cnfn half_divide(float4, float4);\n" "float8 __ovld __cnfn half_divide(float8, float8);\n" "float16 __ovld __cnfn half_divide(float16, float16);\n" "\n" "/**\n" " * Compute the base- e exponential of x.\n" " */\n" "float __ovld __cnfn half_exp(float);\n" "float2 __ovld __cnfn half_exp(float2);\n" "float3 __ovld __cnfn half_exp(float3);\n" "float4 __ovld __cnfn half_exp(float4);\n" "float8 __ovld __cnfn half_exp(float8);\n" "float16 __ovld __cnfn half_exp(float16);\n" "\n" "/**\n" " * Compute the base- 2 exponential of x.\n" " */\n" "float __ovld __cnfn half_exp2(float);\n" "float2 __ovld __cnfn half_exp2(float2);\n" "float3 __ovld __cnfn half_exp2(float3);\n" "float4 __ovld __cnfn half_exp2(float4);\n" "float8 __ovld __cnfn half_exp2(float8);\n" "float16 __ovld __cnfn half_exp2(float16);\n" "\n" "/**\n" " * Compute the base- 10 exponential of x.\n" " */\n" "float __ovld __cnfn half_exp10(float);\n" "float2 __ovld __cnfn half_exp10(float2);\n" "float3 __ovld __cnfn half_exp10(float3);\n" "float4 __ovld __cnfn half_exp10(float4);\n" "float8 __ovld __cnfn half_exp10(float8);\n" "float16 __ovld __cnfn half_exp10(float16);\n" "\n" "/**\n" " * Compute natural logarithm.\n" " */\n" "float __ovld __cnfn half_log(float);\n" "float2 __ovld __cnfn half_log(float2);\n" "float3 __ovld __cnfn half_log(float3);\n" "float4 __ovld __cnfn half_log(float4);\n" "float8 __ovld __cnfn half_log(float8);\n" "float16 __ovld __cnfn half_log(float16);\n" "\n" "/**\n" " * Compute a base 2 logarithm.\n" " */\n" "float __ovld __cnfn half_log2(float);\n" "float2 __ovld __cnfn half_log2(float2);\n" "float3 __ovld __cnfn half_log2(float3);\n" "float4 __ovld __cnfn half_log2(float4);\n" "float8 __ovld __cnfn half_log2(float8);\n" "float16 __ovld __cnfn half_log2(float16);\n" "\n" "/**\n" " * Compute a base 10 logarithm.\n" " */\n" "float __ovld __cnfn half_log10(float);\n" "float2 __ovld __cnfn half_log10(float2);\n" "float3 __ovld __cnfn half_log10(float3);\n" "float4 __ovld __cnfn half_log10(float4);\n" "float8 __ovld __cnfn half_log10(float8);\n" "float16 __ovld __cnfn half_log10(float16);\n" "\n" "/**\n" " * Compute x to the power y, where x is >= 0.\n" " */\n" "float __ovld __cnfn half_powr(float, float);\n" "float2 __ovld __cnfn half_powr(float2, float2);\n" "float3 __ovld __cnfn half_powr(float3, float3);\n" "float4 __ovld __cnfn half_powr(float4, float4);\n" "float8 __ovld __cnfn half_powr(float8, float8);\n" "float16 __ovld __cnfn half_powr(float16, float16);\n" "\n" "/**\n" " * Compute reciprocal.\n" " */\n" "float __ovld __cnfn half_recip(float);\n" "float2 __ovld __cnfn half_recip(float2);\n" "float3 __ovld __cnfn half_recip(float3);\n" "float4 __ovld __cnfn half_recip(float4);\n" "float8 __ovld __cnfn half_recip(float8);\n" "float16 __ovld __cnfn half_recip(float16);\n" "\n" "/**\n" " * Compute inverse square root.\n" " */\n" "float __ovld __cnfn half_rsqrt(float);\n" "float2 __ovld __cnfn half_rsqrt(float2);\n" "float3 __ovld __cnfn half_rsqrt(float3);\n" "float4 __ovld __cnfn half_rsqrt(float4);\n" "float8 __ovld __cnfn half_rsqrt(float8);\n" "float16 __ovld __cnfn half_rsqrt(float16);\n" "\n" "/**\n" " * Compute sine. x must be in the range -2^16 ... +2^16.\n" " */\n" "float __ovld __cnfn half_sin(float);\n" "float2 __ovld __cnfn half_sin(float2);\n" "float3 __ovld __cnfn half_sin(float3);\n" "float4 __ovld __cnfn half_sin(float4);\n" "float8 __ovld __cnfn half_sin(float8);\n" "float16 __ovld __cnfn half_sin(float16);\n" "\n" "/**\n" " * Compute square root.\n" " */\n" "float __ovld __cnfn half_sqrt(float);\n" "float2 __ovld __cnfn half_sqrt(float2);\n" "float3 __ovld __cnfn half_sqrt(float3);\n" "float4 __ovld __cnfn half_sqrt(float4);\n" "float8 __ovld __cnfn half_sqrt(float8);\n" "float16 __ovld __cnfn half_sqrt(float16);\n" "\n" "/**\n" " * Compute tangent. x must be in the range -216 ... +216.\n" " */\n" "float __ovld __cnfn half_tan(float);\n" "float2 __ovld __cnfn half_tan(float2);\n" "float3 __ovld __cnfn half_tan(float3);\n" "float4 __ovld __cnfn half_tan(float4);\n" "float8 __ovld __cnfn half_tan(float8);\n" "float16 __ovld __cnfn half_tan(float16);\n" "\n" "/**\n" " * Compute cosine over an implementation-defined range.\n" " * The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_cos(float);\n" "float2 __ovld __cnfn native_cos(float2);\n" "float3 __ovld __cnfn native_cos(float3);\n" "float4 __ovld __cnfn native_cos(float4);\n" "float8 __ovld __cnfn native_cos(float8);\n" "float16 __ovld __cnfn native_cos(float16);\n" "\n" "/**\n" " * Compute x / y over an implementation-defined range.\n" " * The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_divide(float, float);\n" "float2 __ovld __cnfn native_divide(float2, float2);\n" "float3 __ovld __cnfn native_divide(float3, float3);\n" "float4 __ovld __cnfn native_divide(float4, float4);\n" "float8 __ovld __cnfn native_divide(float8, float8);\n" "float16 __ovld __cnfn native_divide(float16, float16);\n" "\n" "/**\n" " * Compute the base- e exponential of x over an\n" " * implementation-defined range. The maximum error is\n" " * implementation-defined.\n" " */\n" "float __ovld __cnfn native_exp(float);\n" "float2 __ovld __cnfn native_exp(float2);\n" "float3 __ovld __cnfn native_exp(float3);\n" "float4 __ovld __cnfn native_exp(float4);\n" "float8 __ovld __cnfn native_exp(float8);\n" "float16 __ovld __cnfn native_exp(float16);\n" "\n" "/**\n" " * Compute the base- 2 exponential of x over an\n" " * implementation-defined range. The maximum error is\n" " * implementation-defined.\n" " */\n" "float __ovld __cnfn native_exp2(float);\n" "float2 __ovld __cnfn native_exp2(float2);\n" "float3 __ovld __cnfn native_exp2(float3);\n" "float4 __ovld __cnfn native_exp2(float4);\n" "float8 __ovld __cnfn native_exp2(float8);\n" "float16 __ovld __cnfn native_exp2(float16);\n" "\n" "/**\n" " * Compute the base- 10 exponential of x over an\n" " * implementation-defined range. The maximum error is\n" " * implementation-defined.\n" " */\n" "float __ovld __cnfn native_exp10(float);\n" "float2 __ovld __cnfn native_exp10(float2);\n" "float3 __ovld __cnfn native_exp10(float3);\n" "float4 __ovld __cnfn native_exp10(float4);\n" "float8 __ovld __cnfn native_exp10(float8);\n" "float16 __ovld __cnfn native_exp10(float16);\n" "\n" "/**\n" " * Compute natural logarithm over an implementationdefined\n" " * range. The maximum error is implementation\n" " * defined.\n" " */\n" "float __ovld __cnfn native_log(float);\n" "float2 __ovld __cnfn native_log(float2);\n" "float3 __ovld __cnfn native_log(float3);\n" "float4 __ovld __cnfn native_log(float4);\n" "float8 __ovld __cnfn native_log(float8);\n" "float16 __ovld __cnfn native_log(float16);\n" "\n" "/**\n" " * Compute a base 2 logarithm over an implementationdefined\n" " * range. The maximum error is implementationdefined.\n" " */\n" "float __ovld __cnfn native_log2(float);\n" "float2 __ovld __cnfn native_log2(float2);\n" "float3 __ovld __cnfn native_log2(float3);\n" "float4 __ovld __cnfn native_log2(float4);\n" "float8 __ovld __cnfn native_log2(float8);\n" "float16 __ovld __cnfn native_log2(float16);\n" "\n" "/**\n" " * Compute a base 10 logarithm over an implementationdefined\n" " * range. The maximum error is implementationdefined.\n" " */\n" "float __ovld __cnfn native_log10(float);\n" "float2 __ovld __cnfn native_log10(float2);\n" "float3 __ovld __cnfn native_log10(float3);\n" "float4 __ovld __cnfn native_log10(float4);\n" "float8 __ovld __cnfn native_log10(float8);\n" "float16 __ovld __cnfn native_log10(float16);\n" "\n" "/**\n" " * Compute x to the power y, where x is >= 0. The range of\n" " * x and y are implementation-defined. The maximum error\n" " * is implementation-defined.\n" " */\n" "float __ovld __cnfn native_powr(float, float);\n" "float2 __ovld __cnfn native_powr(float2, float2);\n" "float3 __ovld __cnfn native_powr(float3, float3);\n" "float4 __ovld __cnfn native_powr(float4, float4);\n" "float8 __ovld __cnfn native_powr(float8, float8);\n" "float16 __ovld __cnfn native_powr(float16, float16);\n" "\n" "/**\n" " * Compute reciprocal over an implementation-defined\n" " * range. The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_recip(float);\n" "float2 __ovld __cnfn native_recip(float2);\n" "float3 __ovld __cnfn native_recip(float3);\n" "float4 __ovld __cnfn native_recip(float4);\n" "float8 __ovld __cnfn native_recip(float8);\n" "float16 __ovld __cnfn native_recip(float16);\n" "\n" "/**\n" " * Compute inverse square root over an implementationdefined\n" " * range. The maximum error is implementationdefined.\n" " */\n" "float __ovld __cnfn native_rsqrt(float);\n" "float2 __ovld __cnfn native_rsqrt(float2);\n" "float3 __ovld __cnfn native_rsqrt(float3);\n" "float4 __ovld __cnfn native_rsqrt(float4);\n" "float8 __ovld __cnfn native_rsqrt(float8);\n" "float16 __ovld __cnfn native_rsqrt(float16);\n" "\n" "/**\n" " * Compute sine over an implementation-defined range.\n" " * The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_sin(float);\n" "float2 __ovld __cnfn native_sin(float2);\n" "float3 __ovld __cnfn native_sin(float3);\n" "float4 __ovld __cnfn native_sin(float4);\n" "float8 __ovld __cnfn native_sin(float8);\n" "float16 __ovld __cnfn native_sin(float16);\n" "\n" "/**\n" " * Compute square root over an implementation-defined\n" " * range. The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_sqrt(float);\n" "float2 __ovld __cnfn native_sqrt(float2);\n" "float3 __ovld __cnfn native_sqrt(float3);\n" "float4 __ovld __cnfn native_sqrt(float4);\n" "float8 __ovld __cnfn native_sqrt(float8);\n" "float16 __ovld __cnfn native_sqrt(float16);\n" "\n" "/**\n" " * Compute tangent over an implementation-defined range.\n" " * The maximum error is implementation-defined.\n" " */\n" "float __ovld __cnfn native_tan(float);\n" "float2 __ovld __cnfn native_tan(float2);\n" "float3 __ovld __cnfn native_tan(float3);\n" "float4 __ovld __cnfn native_tan(float4);\n" "float8 __ovld __cnfn native_tan(float8);\n" "float16 __ovld __cnfn native_tan(float16);\n" "\n" "// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions\n" "\n" "/**\n" " * Returns | x |.\n" " */\n" "uchar __ovld __cnfn abs(char);\n" "uchar __ovld __cnfn abs(uchar);\n" "uchar2 __ovld __cnfn abs(char2);\n" "uchar2 __ovld __cnfn abs(uchar2);\n" "uchar3 __ovld __cnfn abs(char3);\n" "uchar3 __ovld __cnfn abs(uchar3);\n" "uchar4 __ovld __cnfn abs(char4);\n" "uchar4 __ovld __cnfn abs(uchar4);\n" "uchar8 __ovld __cnfn abs(char8);\n" "uchar8 __ovld __cnfn abs(uchar8);\n" "uchar16 __ovld __cnfn abs(char16);\n" "uchar16 __ovld __cnfn abs(uchar16);\n" "ushort __ovld __cnfn abs(short);\n" "ushort __ovld __cnfn abs(ushort);\n" "ushort2 __ovld __cnfn abs(short2);\n" "ushort2 __ovld __cnfn abs(ushort2);\n" "ushort3 __ovld __cnfn abs(short3);\n" "ushort3 __ovld __cnfn abs(ushort3);\n" "ushort4 __ovld __cnfn abs(short4);\n" "ushort4 __ovld __cnfn abs(ushort4);\n" "ushort8 __ovld __cnfn abs(short8);\n" "ushort8 __ovld __cnfn abs(ushort8);\n" "ushort16 __ovld __cnfn abs(short16);\n" "ushort16 __ovld __cnfn abs(ushort16);\n" "uint __ovld __cnfn abs(int);\n" "uint __ovld __cnfn abs(uint);\n" "uint2 __ovld __cnfn abs(int2);\n" "uint2 __ovld __cnfn abs(uint2);\n" "uint3 __ovld __cnfn abs(int3);\n" "uint3 __ovld __cnfn abs(uint3);\n" "uint4 __ovld __cnfn abs(int4);\n" "uint4 __ovld __cnfn abs(uint4);\n" "uint8 __ovld __cnfn abs(int8);\n" "uint8 __ovld __cnfn abs(uint8);\n" "uint16 __ovld __cnfn abs(int16);\n" "uint16 __ovld __cnfn abs(uint16);\n" "ulong __ovld __cnfn abs(long);\n" "ulong __ovld __cnfn abs(ulong);\n" "ulong2 __ovld __cnfn abs(long2);\n" "ulong2 __ovld __cnfn abs(ulong2);\n" "ulong3 __ovld __cnfn abs(long3);\n" "ulong3 __ovld __cnfn abs(ulong3);\n" "ulong4 __ovld __cnfn abs(long4);\n" "ulong4 __ovld __cnfn abs(ulong4);\n" "ulong8 __ovld __cnfn abs(long8);\n" "ulong8 __ovld __cnfn abs(ulong8);\n" "ulong16 __ovld __cnfn abs(long16);\n" "ulong16 __ovld __cnfn abs(ulong16);\n" "\n" "/**\n" " * Returns | x - y | without modulo overflow.\n" " */\n" "uchar __ovld __cnfn abs_diff(char, char);\n" "uchar __ovld __cnfn abs_diff(uchar, uchar);\n" "uchar2 __ovld __cnfn abs_diff(char2, char2);\n" "uchar2 __ovld __cnfn abs_diff(uchar2, uchar2);\n" "uchar3 __ovld __cnfn abs_diff(char3, char3);\n" "uchar3 __ovld __cnfn abs_diff(uchar3, uchar3);\n" "uchar4 __ovld __cnfn abs_diff(char4, char4);\n" "uchar4 __ovld __cnfn abs_diff(uchar4, uchar4);\n" "uchar8 __ovld __cnfn abs_diff(char8, char8);\n" "uchar8 __ovld __cnfn abs_diff(uchar8, uchar8);\n" "uchar16 __ovld __cnfn abs_diff(char16, char16);\n" "uchar16 __ovld __cnfn abs_diff(uchar16, uchar16);\n" "ushort __ovld __cnfn abs_diff(short, short);\n" "ushort __ovld __cnfn abs_diff(ushort, ushort);\n" "ushort2 __ovld __cnfn abs_diff(short2, short2);\n" "ushort2 __ovld __cnfn abs_diff(ushort2, ushort2);\n" "ushort3 __ovld __cnfn abs_diff(short3, short3);\n" "ushort3 __ovld __cnfn abs_diff(ushort3, ushort3);\n" "ushort4 __ovld __cnfn abs_diff(short4, short4);\n" "ushort4 __ovld __cnfn abs_diff(ushort4, ushort4);\n" "ushort8 __ovld __cnfn abs_diff(short8, short8);\n" "ushort8 __ovld __cnfn abs_diff(ushort8, ushort8);\n" "ushort16 __ovld __cnfn abs_diff(short16, short16);\n" "ushort16 __ovld __cnfn abs_diff(ushort16, ushort16);\n" "uint __ovld __cnfn abs_diff(int, int);\n" "uint __ovld __cnfn abs_diff(uint, uint);\n" "uint2 __ovld __cnfn abs_diff(int2, int2);\n" "uint2 __ovld __cnfn abs_diff(uint2, uint2);\n" "uint3 __ovld __cnfn abs_diff(int3, int3);\n" "uint3 __ovld __cnfn abs_diff(uint3, uint3);\n" "uint4 __ovld __cnfn abs_diff(int4, int4);\n" "uint4 __ovld __cnfn abs_diff(uint4, uint4);\n" "uint8 __ovld __cnfn abs_diff(int8, int8);\n" "uint8 __ovld __cnfn abs_diff(uint8, uint8);\n" "uint16 __ovld __cnfn abs_diff(int16, int16);\n" "uint16 __ovld __cnfn abs_diff(uint16, uint16);\n" "ulong __ovld __cnfn abs_diff(long, long);\n" "ulong __ovld __cnfn abs_diff(ulong, ulong);\n" "ulong2 __ovld __cnfn abs_diff(long2, long2);\n" "ulong2 __ovld __cnfn abs_diff(ulong2, ulong2);\n" "ulong3 __ovld __cnfn abs_diff(long3, long3);\n" "ulong3 __ovld __cnfn abs_diff(ulong3, ulong3);\n" "ulong4 __ovld __cnfn abs_diff(long4, long4);\n" "ulong4 __ovld __cnfn abs_diff(ulong4, ulong4);\n" "ulong8 __ovld __cnfn abs_diff(long8, long8);\n" "ulong8 __ovld __cnfn abs_diff(ulong8, ulong8);\n" "ulong16 __ovld __cnfn abs_diff(long16, long16);\n" "ulong16 __ovld __cnfn abs_diff(ulong16, ulong16);\n" "\n" "/**\n" " * Returns x + y and saturates the result.\n" " */\n" "char __ovld __cnfn add_sat(char, char);\n" "uchar __ovld __cnfn add_sat(uchar, uchar);\n" "char2 __ovld __cnfn add_sat(char2, char2);\n" "uchar2 __ovld __cnfn add_sat(uchar2, uchar2);\n" "char3 __ovld __cnfn add_sat(char3, char3);\n" "uchar3 __ovld __cnfn add_sat(uchar3, uchar3);\n" "char4 __ovld __cnfn add_sat(char4, char4);\n" "uchar4 __ovld __cnfn add_sat(uchar4, uchar4);\n" "char8 __ovld __cnfn add_sat(char8, char8);\n" "uchar8 __ovld __cnfn add_sat(uchar8, uchar8);\n" "char16 __ovld __cnfn add_sat(char16, char16);\n" "uchar16 __ovld __cnfn add_sat(uchar16, uchar16);\n" "short __ovld __cnfn add_sat(short, short);\n" "ushort __ovld __cnfn add_sat(ushort, ushort);\n" "short2 __ovld __cnfn add_sat(short2, short2);\n" "ushort2 __ovld __cnfn add_sat(ushort2, ushort2);\n" "short3 __ovld __cnfn add_sat(short3, short3);\n" "ushort3 __ovld __cnfn add_sat(ushort3, ushort3);\n" "short4 __ovld __cnfn add_sat(short4, short4);\n" "ushort4 __ovld __cnfn add_sat(ushort4, ushort4);\n" "short8 __ovld __cnfn add_sat(short8, short8);\n" "ushort8 __ovld __cnfn add_sat(ushort8, ushort8);\n" "short16 __ovld __cnfn add_sat(short16, short16);\n" "ushort16 __ovld __cnfn add_sat(ushort16, ushort16);\n" "int __ovld __cnfn add_sat(int, int);\n" "uint __ovld __cnfn add_sat(uint, uint);\n" "int2 __ovld __cnfn add_sat(int2, int2);\n" "uint2 __ovld __cnfn add_sat(uint2, uint2);\n" "int3 __ovld __cnfn add_sat(int3, int3);\n" "uint3 __ovld __cnfn add_sat(uint3, uint3);\n" "int4 __ovld __cnfn add_sat(int4, int4);\n" "uint4 __ovld __cnfn add_sat(uint4, uint4);\n" "int8 __ovld __cnfn add_sat(int8, int8);\n" "uint8 __ovld __cnfn add_sat(uint8, uint8);\n" "int16 __ovld __cnfn add_sat(int16, int16);\n" "uint16 __ovld __cnfn add_sat(uint16, uint16);\n" "long __ovld __cnfn add_sat(long, long);\n" "ulong __ovld __cnfn add_sat(ulong, ulong);\n" "long2 __ovld __cnfn add_sat(long2, long2);\n" "ulong2 __ovld __cnfn add_sat(ulong2, ulong2);\n" "long3 __ovld __cnfn add_sat(long3, long3);\n" "ulong3 __ovld __cnfn add_sat(ulong3, ulong3);\n" "long4 __ovld __cnfn add_sat(long4, long4);\n" "ulong4 __ovld __cnfn add_sat(ulong4, ulong4);\n" "long8 __ovld __cnfn add_sat(long8, long8);\n" "ulong8 __ovld __cnfn add_sat(ulong8, ulong8);\n" "long16 __ovld __cnfn add_sat(long16, long16);\n" "ulong16 __ovld __cnfn add_sat(ulong16, ulong16);\n" "\n" "/**\n" " * Returns (x + y) >> 1. The intermediate sum does\n" " * not modulo overflow.\n" " */\n" "char __ovld __cnfn hadd(char, char);\n" "uchar __ovld __cnfn hadd(uchar, uchar);\n" "char2 __ovld __cnfn hadd(char2, char2);\n" "uchar2 __ovld __cnfn hadd(uchar2, uchar2);\n" "char3 __ovld __cnfn hadd(char3, char3);\n" "uchar3 __ovld __cnfn hadd(uchar3, uchar3);\n" "char4 __ovld __cnfn hadd(char4, char4);\n" "uchar4 __ovld __cnfn hadd(uchar4, uchar4);\n" "char8 __ovld __cnfn hadd(char8, char8);\n" "uchar8 __ovld __cnfn hadd(uchar8, uchar8);\n" "char16 __ovld __cnfn hadd(char16, char16);\n" "uchar16 __ovld __cnfn hadd(uchar16, uchar16);\n" "short __ovld __cnfn hadd(short, short);\n" "ushort __ovld __cnfn hadd(ushort, ushort);\n" "short2 __ovld __cnfn hadd(short2, short2);\n" "ushort2 __ovld __cnfn hadd(ushort2, ushort2);\n" "short3 __ovld __cnfn hadd(short3, short3);\n" "ushort3 __ovld __cnfn hadd(ushort3, ushort3);\n" "short4 __ovld __cnfn hadd(short4, short4);\n" "ushort4 __ovld __cnfn hadd(ushort4, ushort4);\n" "short8 __ovld __cnfn hadd(short8, short8);\n" "ushort8 __ovld __cnfn hadd(ushort8, ushort8);\n" "short16 __ovld __cnfn hadd(short16, short16);\n" "ushort16 __ovld __cnfn hadd(ushort16, ushort16);\n" "int __ovld __cnfn hadd(int, int);\n" "uint __ovld __cnfn hadd(uint, uint);\n" "int2 __ovld __cnfn hadd(int2, int2);\n" "uint2 __ovld __cnfn hadd(uint2, uint2);\n" "int3 __ovld __cnfn hadd(int3, int3);\n" "uint3 __ovld __cnfn hadd(uint3, uint3);\n" "int4 __ovld __cnfn hadd(int4, int4);\n" "uint4 __ovld __cnfn hadd(uint4, uint4);\n" "int8 __ovld __cnfn hadd(int8, int8);\n" "uint8 __ovld __cnfn hadd(uint8, uint8);\n" "int16 __ovld __cnfn hadd(int16, int16);\n" "uint16 __ovld __cnfn hadd(uint16, uint16);\n" "long __ovld __cnfn hadd(long, long);\n" "ulong __ovld __cnfn hadd(ulong, ulong);\n" "long2 __ovld __cnfn hadd(long2, long2);\n" "ulong2 __ovld __cnfn hadd(ulong2, ulong2);\n" "long3 __ovld __cnfn hadd(long3, long3);\n" "ulong3 __ovld __cnfn hadd(ulong3, ulong3);\n" "long4 __ovld __cnfn hadd(long4, long4);\n" "ulong4 __ovld __cnfn hadd(ulong4, ulong4);\n" "long8 __ovld __cnfn hadd(long8, long8);\n" "ulong8 __ovld __cnfn hadd(ulong8, ulong8);\n" "long16 __ovld __cnfn hadd(long16, long16);\n" "ulong16 __ovld __cnfn hadd(ulong16, ulong16);\n" "\n" "/**\n" " * Returns (x + y + 1) >> 1. The intermediate sum\n" " * does not modulo overflow.\n" " */\n" "char __ovld __cnfn rhadd(char, char);\n" "uchar __ovld __cnfn rhadd(uchar, uchar);\n" "char2 __ovld __cnfn rhadd(char2, char2);\n" "uchar2 __ovld __cnfn rhadd(uchar2, uchar2);\n" "char3 __ovld __cnfn rhadd(char3, char3);\n" "uchar3 __ovld __cnfn rhadd(uchar3, uchar3);\n" "char4 __ovld __cnfn rhadd(char4, char4);\n" "uchar4 __ovld __cnfn rhadd(uchar4, uchar4);\n" "char8 __ovld __cnfn rhadd(char8, char8);\n" "uchar8 __ovld __cnfn rhadd(uchar8, uchar8);\n" "char16 __ovld __cnfn rhadd(char16, char16);\n" "uchar16 __ovld __cnfn rhadd(uchar16, uchar16);\n" "short __ovld __cnfn rhadd(short, short);\n" "ushort __ovld __cnfn rhadd(ushort, ushort);\n" "short2 __ovld __cnfn rhadd(short2, short2);\n" "ushort2 __ovld __cnfn rhadd(ushort2, ushort2);\n" "short3 __ovld __cnfn rhadd(short3, short3);\n" "ushort3 __ovld __cnfn rhadd(ushort3, ushort3);\n" "short4 __ovld __cnfn rhadd(short4, short4);\n" "ushort4 __ovld __cnfn rhadd(ushort4, ushort4);\n" "short8 __ovld __cnfn rhadd(short8, short8);\n" "ushort8 __ovld __cnfn rhadd(ushort8, ushort8);\n" "short16 __ovld __cnfn rhadd(short16, short16);\n" "ushort16 __ovld __cnfn rhadd(ushort16, ushort16);\n" "int __ovld __cnfn rhadd(int, int);\n" "uint __ovld __cnfn rhadd(uint, uint);\n" "int2 __ovld __cnfn rhadd(int2, int2);\n" "uint2 __ovld __cnfn rhadd(uint2, uint2);\n" "int3 __ovld __cnfn rhadd(int3, int3);\n" "uint3 __ovld __cnfn rhadd(uint3, uint3);\n" "int4 __ovld __cnfn rhadd(int4, int4);\n" "uint4 __ovld __cnfn rhadd(uint4, uint4);\n" "int8 __ovld __cnfn rhadd(int8, int8);\n" "uint8 __ovld __cnfn rhadd(uint8, uint8);\n" "int16 __ovld __cnfn rhadd(int16, int16);\n" "uint16 __ovld __cnfn rhadd(uint16, uint16);\n" "long __ovld __cnfn rhadd(long, long);\n" "ulong __ovld __cnfn rhadd(ulong, ulong);\n" "long2 __ovld __cnfn rhadd(long2, long2);\n" "ulong2 __ovld __cnfn rhadd(ulong2, ulong2);\n" "long3 __ovld __cnfn rhadd(long3, long3);\n" "ulong3 __ovld __cnfn rhadd(ulong3, ulong3);\n" "long4 __ovld __cnfn rhadd(long4, long4);\n" "ulong4 __ovld __cnfn rhadd(ulong4, ulong4);\n" "long8 __ovld __cnfn rhadd(long8, long8);\n" "ulong8 __ovld __cnfn rhadd(ulong8, ulong8);\n" "long16 __ovld __cnfn rhadd(long16, long16);\n" "ulong16 __ovld __cnfn rhadd(ulong16, ulong16);\n" "\n" "/**\n" " * Returns min(max(x, minval), maxval).\n" " * Results are undefined if minval > maxval.\n" " */\n" "char __ovld __cnfn clamp(char, char, char);\n" "uchar __ovld __cnfn clamp(uchar, uchar, uchar);\n" "char2 __ovld __cnfn clamp(char2, char2, char2);\n" "uchar2 __ovld __cnfn clamp(uchar2, uchar2, uchar2);\n" "char3 __ovld __cnfn clamp(char3, char3, char3);\n" "uchar3 __ovld __cnfn clamp(uchar3, uchar3, uchar3);\n" "char4 __ovld __cnfn clamp(char4, char4, char4);\n" "uchar4 __ovld __cnfn clamp(uchar4, uchar4, uchar4);\n" "char8 __ovld __cnfn clamp(char8, char8, char8);\n" "uchar8 __ovld __cnfn clamp(uchar8, uchar8, uchar8);\n" "char16 __ovld __cnfn clamp(char16, char16, char16);\n" "uchar16 __ovld __cnfn clamp(uchar16, uchar16, uchar16);\n" "short __ovld __cnfn clamp(short, short, short);\n" "ushort __ovld __cnfn clamp(ushort, ushort, ushort);\n" "short2 __ovld __cnfn clamp(short2, short2, short2);\n" "ushort2 __ovld __cnfn clamp(ushort2, ushort2, ushort2);\n" "short3 __ovld __cnfn clamp(short3, short3, short3);\n" "ushort3 __ovld __cnfn clamp(ushort3, ushort3, ushort3);\n" "short4 __ovld __cnfn clamp(short4, short4, short4);\n" "ushort4 __ovld __cnfn clamp(ushort4, ushort4, ushort4);\n" "short8 __ovld __cnfn clamp(short8, short8, short8);\n" "ushort8 __ovld __cnfn clamp(ushort8, ushort8, ushort8);\n" "short16 __ovld __cnfn clamp(short16, short16, short16);\n" "ushort16 __ovld __cnfn clamp(ushort16, ushort16, ushort16);\n" "int __ovld __cnfn clamp(int, int, int);\n" "uint __ovld __cnfn clamp(uint, uint, uint);\n" "int2 __ovld __cnfn clamp(int2, int2, int2);\n" "uint2 __ovld __cnfn clamp(uint2, uint2, uint2);\n" "int3 __ovld __cnfn clamp(int3, int3, int3);\n" "uint3 __ovld __cnfn clamp(uint3, uint3, uint3);\n" "int4 __ovld __cnfn clamp(int4, int4, int4);\n" "uint4 __ovld __cnfn clamp(uint4, uint4, uint4);\n" "int8 __ovld __cnfn clamp(int8, int8, int8);\n" "uint8 __ovld __cnfn clamp(uint8, uint8, uint8);\n" "int16 __ovld __cnfn clamp(int16, int16, int16);\n" "uint16 __ovld __cnfn clamp(uint16, uint16, uint16);\n" "long __ovld __cnfn clamp(long, long, long);\n" "ulong __ovld __cnfn clamp(ulong, ulong, ulong);\n" "long2 __ovld __cnfn clamp(long2, long2, long2);\n" "ulong2 __ovld __cnfn clamp(ulong2, ulong2, ulong2);\n" "long3 __ovld __cnfn clamp(long3, long3, long3);\n" "ulong3 __ovld __cnfn clamp(ulong3, ulong3, ulong3);\n" "long4 __ovld __cnfn clamp(long4, long4, long4);\n" "ulong4 __ovld __cnfn clamp(ulong4, ulong4, ulong4);\n" "long8 __ovld __cnfn clamp(long8, long8, long8);\n" "ulong8 __ovld __cnfn clamp(ulong8, ulong8, ulong8);\n" "long16 __ovld __cnfn clamp(long16, long16, long16);\n" "ulong16 __ovld __cnfn clamp(ulong16, ulong16, ulong16);\n" "char2 __ovld __cnfn clamp(char2, char, char);\n" "uchar2 __ovld __cnfn clamp(uchar2, uchar, uchar);\n" "char3 __ovld __cnfn clamp(char3, char, char);\n" "uchar3 __ovld __cnfn clamp(uchar3, uchar, uchar);\n" "char4 __ovld __cnfn clamp(char4, char, char);\n" "uchar4 __ovld __cnfn clamp(uchar4, uchar, uchar);\n" "char8 __ovld __cnfn clamp(char8, char, char);\n" "uchar8 __ovld __cnfn clamp(uchar8, uchar, uchar);\n" "char16 __ovld __cnfn clamp(char16, char, char);\n" "uchar16 __ovld __cnfn clamp(uchar16, uchar, uchar);\n" "short2 __ovld __cnfn clamp(short2, short, short);\n" "ushort2 __ovld __cnfn clamp(ushort2, ushort, ushort);\n" "short3 __ovld __cnfn clamp(short3, short, short);\n" "ushort3 __ovld __cnfn clamp(ushort3, ushort, ushort);\n" "short4 __ovld __cnfn clamp(short4, short, short);\n" "ushort4 __ovld __cnfn clamp(ushort4, ushort, ushort);\n" "short8 __ovld __cnfn clamp(short8, short, short);\n" "ushort8 __ovld __cnfn clamp(ushort8, ushort, ushort);\n" "short16 __ovld __cnfn clamp(short16, short, short);\n" "ushort16 __ovld __cnfn clamp(ushort16, ushort, ushort);\n" "int2 __ovld __cnfn clamp(int2, int, int);\n" "uint2 __ovld __cnfn clamp(uint2, uint, uint);\n" "int3 __ovld __cnfn clamp(int3, int, int);\n" "uint3 __ovld __cnfn clamp(uint3, uint, uint);\n" "int4 __ovld __cnfn clamp(int4, int, int);\n" "uint4 __ovld __cnfn clamp(uint4, uint, uint);\n" "int8 __ovld __cnfn clamp(int8, int, int);\n" "uint8 __ovld __cnfn clamp(uint8, uint, uint);\n" "int16 __ovld __cnfn clamp(int16, int, int);\n" "uint16 __ovld __cnfn clamp(uint16, uint, uint);\n" "long2 __ovld __cnfn clamp(long2, long, long);\n" "ulong2 __ovld __cnfn clamp(ulong2, ulong, ulong);\n" "long3 __ovld __cnfn clamp(long3, long, long);\n" "ulong3 __ovld __cnfn clamp(ulong3, ulong, ulong);\n" "long4 __ovld __cnfn clamp(long4, long, long);\n" "ulong4 __ovld __cnfn clamp(ulong4, ulong, ulong);\n" "long8 __ovld __cnfn clamp(long8, long, long);\n" "ulong8 __ovld __cnfn clamp(ulong8, ulong, ulong);\n" "long16 __ovld __cnfn clamp(long16, long, long);\n" "ulong16 __ovld __cnfn clamp(ulong16, ulong, ulong);\n" "\n" "/**\n" " * Returns the number of leading 0-bits in x, starting\n" " * at the most significant bit position.\n" " */\n" "char __ovld __cnfn clz(char);\n" "uchar __ovld __cnfn clz(uchar);\n" "char2 __ovld __cnfn clz(char2);\n" "uchar2 __ovld __cnfn clz(uchar2);\n" "char3 __ovld __cnfn clz(char3);\n" "uchar3 __ovld __cnfn clz(uchar3);\n" "char4 __ovld __cnfn clz(char4);\n" "uchar4 __ovld __cnfn clz(uchar4);\n" "char8 __ovld __cnfn clz(char8);\n" "uchar8 __ovld __cnfn clz(uchar8);\n" "char16 __ovld __cnfn clz(char16);\n" "uchar16 __ovld __cnfn clz(uchar16);\n" "short __ovld __cnfn clz(short);\n" "ushort __ovld __cnfn clz(ushort);\n" "short2 __ovld __cnfn clz(short2);\n" "ushort2 __ovld __cnfn clz(ushort2);\n" "short3 __ovld __cnfn clz(short3);\n" "ushort3 __ovld __cnfn clz(ushort3);\n" "short4 __ovld __cnfn clz(short4);\n" "ushort4 __ovld __cnfn clz(ushort4);\n" "short8 __ovld __cnfn clz(short8);\n" "ushort8 __ovld __cnfn clz(ushort8);\n" "short16 __ovld __cnfn clz(short16);\n" "ushort16 __ovld __cnfn clz(ushort16);\n" "int __ovld __cnfn clz(int);\n" "uint __ovld __cnfn clz(uint);\n" "int2 __ovld __cnfn clz(int2);\n" "uint2 __ovld __cnfn clz(uint2);\n" "int3 __ovld __cnfn clz(int3);\n" "uint3 __ovld __cnfn clz(uint3);\n" "int4 __ovld __cnfn clz(int4);\n" "uint4 __ovld __cnfn clz(uint4);\n" "int8 __ovld __cnfn clz(int8);\n" "uint8 __ovld __cnfn clz(uint8);\n" "int16 __ovld __cnfn clz(int16);\n" "uint16 __ovld __cnfn clz(uint16);\n" "long __ovld __cnfn clz(long);\n" "ulong __ovld __cnfn clz(ulong);\n" "long2 __ovld __cnfn clz(long2);\n" "ulong2 __ovld __cnfn clz(ulong2);\n" "long3 __ovld __cnfn clz(long3);\n" "ulong3 __ovld __cnfn clz(ulong3);\n" "long4 __ovld __cnfn clz(long4);\n" "ulong4 __ovld __cnfn clz(ulong4);\n" "long8 __ovld __cnfn clz(long8);\n" "ulong8 __ovld __cnfn clz(ulong8);\n" "long16 __ovld __cnfn clz(long16);\n" "ulong16 __ovld __cnfn clz(ulong16);\n" "\n" "/**\n" " * Returns the count of trailing 0-bits in x. If x is 0,\n" " * returns the size in bits of the type of x or\n" " * component type of x, if x is a vector.\n" " */\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "char __ovld __cnfn ctz(char);\n" "uchar __ovld __cnfn ctz(uchar);\n" "char2 __ovld __cnfn ctz(char2);\n" "uchar2 __ovld __cnfn ctz(uchar2);\n" "char3 __ovld __cnfn ctz(char3);\n" "uchar3 __ovld __cnfn ctz(uchar3);\n" "char4 __ovld __cnfn ctz(char4);\n" "uchar4 __ovld __cnfn ctz(uchar4);\n" "char8 __ovld __cnfn ctz(char8);\n" "uchar8 __ovld __cnfn ctz(uchar8);\n" "char16 __ovld __cnfn ctz(char16);\n" "uchar16 __ovld __cnfn ctz(uchar16);\n" "short __ovld __cnfn ctz(short);\n" "ushort __ovld __cnfn ctz(ushort);\n" "short2 __ovld __cnfn ctz(short2);\n" "ushort2 __ovld __cnfn ctz(ushort2);\n" "short3 __ovld __cnfn ctz(short3);\n" "ushort3 __ovld __cnfn ctz(ushort3);\n" "short4 __ovld __cnfn ctz(short4);\n" "ushort4 __ovld __cnfn ctz(ushort4);\n" "short8 __ovld __cnfn ctz(short8);\n" "ushort8 __ovld __cnfn ctz(ushort8);\n" "short16 __ovld __cnfn ctz(short16);\n" "ushort16 __ovld __cnfn ctz(ushort16);\n" "int __ovld __cnfn ctz(int);\n" "uint __ovld __cnfn ctz(uint);\n" "int2 __ovld __cnfn ctz(int2);\n" "uint2 __ovld __cnfn ctz(uint2);\n" "int3 __ovld __cnfn ctz(int3);\n" "uint3 __ovld __cnfn ctz(uint3);\n" "int4 __ovld __cnfn ctz(int4);\n" "uint4 __ovld __cnfn ctz(uint4);\n" "int8 __ovld __cnfn ctz(int8);\n" "uint8 __ovld __cnfn ctz(uint8);\n" "int16 __ovld __cnfn ctz(int16);\n" "uint16 __ovld __cnfn ctz(uint16);\n" "long __ovld __cnfn ctz(long);\n" "ulong __ovld __cnfn ctz(ulong);\n" "long2 __ovld __cnfn ctz(long2);\n" "ulong2 __ovld __cnfn ctz(ulong2);\n" "long3 __ovld __cnfn ctz(long3);\n" "ulong3 __ovld __cnfn ctz(ulong3);\n" "long4 __ovld __cnfn ctz(long4);\n" "ulong4 __ovld __cnfn ctz(ulong4);\n" "long8 __ovld __cnfn ctz(long8);\n" "ulong8 __ovld __cnfn ctz(ulong8);\n" "long16 __ovld __cnfn ctz(long16);\n" "ulong16 __ovld __cnfn ctz(ulong16);\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "/**\n" " * Returns mul_hi(a, b) + c.\n" " */\n" "char __ovld __cnfn mad_hi(char, char, char);\n" "uchar __ovld __cnfn mad_hi(uchar, uchar, uchar);\n" "char2 __ovld __cnfn mad_hi(char2, char2, char2);\n" "uchar2 __ovld __cnfn mad_hi(uchar2, uchar2, uchar2);\n" "char3 __ovld __cnfn mad_hi(char3, char3, char3);\n" "uchar3 __ovld __cnfn mad_hi(uchar3, uchar3, uchar3);\n" "char4 __ovld __cnfn mad_hi(char4, char4, char4);\n" "uchar4 __ovld __cnfn mad_hi(uchar4, uchar4, uchar4);\n" "char8 __ovld __cnfn mad_hi(char8, char8, char8);\n" "uchar8 __ovld __cnfn mad_hi(uchar8, uchar8, uchar8);\n" "char16 __ovld __cnfn mad_hi(char16, char16, char16);\n" "uchar16 __ovld __cnfn mad_hi(uchar16, uchar16, uchar16);\n" "short __ovld __cnfn mad_hi(short, short, short);\n" "ushort __ovld __cnfn mad_hi(ushort, ushort, ushort);\n" "short2 __ovld __cnfn mad_hi(short2, short2, short2);\n" "ushort2 __ovld __cnfn mad_hi(ushort2, ushort2, ushort2);\n" "short3 __ovld __cnfn mad_hi(short3, short3, short3);\n" "ushort3 __ovld __cnfn mad_hi(ushort3, ushort3, ushort3);\n" "short4 __ovld __cnfn mad_hi(short4, short4, short4);\n" "ushort4 __ovld __cnfn mad_hi(ushort4, ushort4, ushort4);\n" "short8 __ovld __cnfn mad_hi(short8, short8, short8);\n" "ushort8 __ovld __cnfn mad_hi(ushort8, ushort8, ushort8);\n" "short16 __ovld __cnfn mad_hi(short16, short16, short16);\n" "ushort16 __ovld __cnfn mad_hi(ushort16, ushort16, ushort16);\n" "int __ovld __cnfn mad_hi(int, int, int);\n" "uint __ovld __cnfn mad_hi(uint, uint, uint);\n" "int2 __ovld __cnfn mad_hi(int2, int2, int2);\n" "uint2 __ovld __cnfn mad_hi(uint2, uint2, uint2);\n" "int3 __ovld __cnfn mad_hi(int3, int3, int3);\n" "uint3 __ovld __cnfn mad_hi(uint3, uint3, uint3);\n" "int4 __ovld __cnfn mad_hi(int4, int4, int4);\n" "uint4 __ovld __cnfn mad_hi(uint4, uint4, uint4);\n" "int8 __ovld __cnfn mad_hi(int8, int8, int8);\n" "uint8 __ovld __cnfn mad_hi(uint8, uint8, uint8);\n" "int16 __ovld __cnfn mad_hi(int16, int16, int16);\n" "uint16 __ovld __cnfn mad_hi(uint16, uint16, uint16);\n" "long __ovld __cnfn mad_hi(long, long, long);\n" "ulong __ovld __cnfn mad_hi(ulong, ulong, ulong);\n" "long2 __ovld __cnfn mad_hi(long2, long2, long2);\n" "ulong2 __ovld __cnfn mad_hi(ulong2, ulong2, ulong2);\n" "long3 __ovld __cnfn mad_hi(long3, long3, long3);\n" "ulong3 __ovld __cnfn mad_hi(ulong3, ulong3, ulong3);\n" "long4 __ovld __cnfn mad_hi(long4, long4, long4);\n" "ulong4 __ovld __cnfn mad_hi(ulong4, ulong4, ulong4);\n" "long8 __ovld __cnfn mad_hi(long8, long8, long8);\n" "ulong8 __ovld __cnfn mad_hi(ulong8, ulong8, ulong8);\n" "long16 __ovld __cnfn mad_hi(long16, long16, long16);\n" "ulong16 __ovld __cnfn mad_hi(ulong16, ulong16, ulong16);\n" "\n" "/**\n" " * Returns a * b + c and saturates the result.\n" " */\n" "char __ovld __cnfn mad_sat(char, char, char);\n" "uchar __ovld __cnfn mad_sat(uchar, uchar, uchar);\n" "char2 __ovld __cnfn mad_sat(char2, char2, char2);\n" "uchar2 __ovld __cnfn mad_sat(uchar2, uchar2, uchar2);\n" "char3 __ovld __cnfn mad_sat(char3, char3, char3);\n" "uchar3 __ovld __cnfn mad_sat(uchar3, uchar3, uchar3);\n" "char4 __ovld __cnfn mad_sat(char4, char4, char4);\n" "uchar4 __ovld __cnfn mad_sat(uchar4, uchar4, uchar4);\n" "char8 __ovld __cnfn mad_sat(char8, char8, char8);\n" "uchar8 __ovld __cnfn mad_sat(uchar8, uchar8, uchar8);\n" "char16 __ovld __cnfn mad_sat(char16, char16, char16);\n" "uchar16 __ovld __cnfn mad_sat(uchar16, uchar16, uchar16);\n" "short __ovld __cnfn mad_sat(short, short, short);\n" "ushort __ovld __cnfn mad_sat(ushort, ushort, ushort);\n" "short2 __ovld __cnfn mad_sat(short2, short2, short2);\n" "ushort2 __ovld __cnfn mad_sat(ushort2, ushort2, ushort2);\n" "short3 __ovld __cnfn mad_sat(short3, short3, short3);\n" "ushort3 __ovld __cnfn mad_sat(ushort3, ushort3, ushort3);\n" "short4 __ovld __cnfn mad_sat(short4, short4, short4);\n" "ushort4 __ovld __cnfn mad_sat(ushort4, ushort4, ushort4);\n" "short8 __ovld __cnfn mad_sat(short8, short8, short8);\n" "ushort8 __ovld __cnfn mad_sat(ushort8, ushort8, ushort8);\n" "short16 __ovld __cnfn mad_sat(short16, short16, short16);\n" "ushort16 __ovld __cnfn mad_sat(ushort16, ushort16, ushort16);\n" "int __ovld __cnfn mad_sat(int, int, int);\n" "uint __ovld __cnfn mad_sat(uint, uint, uint);\n" "int2 __ovld __cnfn mad_sat(int2, int2, int2);\n" "uint2 __ovld __cnfn mad_sat(uint2, uint2, uint2);\n" "int3 __ovld __cnfn mad_sat(int3, int3, int3);\n" "uint3 __ovld __cnfn mad_sat(uint3, uint3, uint3);\n" "int4 __ovld __cnfn mad_sat(int4, int4, int4);\n" "uint4 __ovld __cnfn mad_sat(uint4, uint4, uint4);\n" "int8 __ovld __cnfn mad_sat(int8, int8, int8);\n" "uint8 __ovld __cnfn mad_sat(uint8, uint8, uint8);\n" "int16 __ovld __cnfn mad_sat(int16, int16, int16);\n" "uint16 __ovld __cnfn mad_sat(uint16, uint16, uint16);\n" "long __ovld __cnfn mad_sat(long, long, long);\n" "ulong __ovld __cnfn mad_sat(ulong, ulong, ulong);\n" "long2 __ovld __cnfn mad_sat(long2, long2, long2);\n" "ulong2 __ovld __cnfn mad_sat(ulong2, ulong2, ulong2);\n" "long3 __ovld __cnfn mad_sat(long3, long3, long3);\n" "ulong3 __ovld __cnfn mad_sat(ulong3, ulong3, ulong3);\n" "long4 __ovld __cnfn mad_sat(long4, long4, long4);\n" "ulong4 __ovld __cnfn mad_sat(ulong4, ulong4, ulong4);\n" "long8 __ovld __cnfn mad_sat(long8, long8, long8);\n" "ulong8 __ovld __cnfn mad_sat(ulong8, ulong8, ulong8);\n" "long16 __ovld __cnfn mad_sat(long16, long16, long16);\n" "ulong16 __ovld __cnfn mad_sat(ulong16, ulong16, ulong16);\n" "\n" "/**\n" " * Returns y if x < y, otherwise it returns x.\n" " */\n" "char __ovld __cnfn max(char, char);\n" "uchar __ovld __cnfn max(uchar, uchar);\n" "char2 __ovld __cnfn max(char2, char2);\n" "uchar2 __ovld __cnfn max(uchar2, uchar2);\n" "char3 __ovld __cnfn max(char3, char3);\n" "uchar3 __ovld __cnfn max(uchar3, uchar3);\n" "char4 __ovld __cnfn max(char4, char4);\n" "uchar4 __ovld __cnfn max(uchar4, uchar4);\n" "char8 __ovld __cnfn max(char8, char8);\n" "uchar8 __ovld __cnfn max(uchar8, uchar8);\n" "char16 __ovld __cnfn max(char16, char16);\n" "uchar16 __ovld __cnfn max(uchar16, uchar16);\n" "short __ovld __cnfn max(short, short);\n" "ushort __ovld __cnfn max(ushort, ushort);\n" "short2 __ovld __cnfn max(short2, short2);\n" "ushort2 __ovld __cnfn max(ushort2, ushort2);\n" "short3 __ovld __cnfn max(short3, short3);\n" "ushort3 __ovld __cnfn max(ushort3, ushort3);\n" "short4 __ovld __cnfn max(short4, short4);\n" "ushort4 __ovld __cnfn max(ushort4, ushort4);\n" "short8 __ovld __cnfn max(short8, short8);\n" "ushort8 __ovld __cnfn max(ushort8, ushort8);\n" "short16 __ovld __cnfn max(short16, short16);\n" "ushort16 __ovld __cnfn max(ushort16, ushort16);\n" "int __ovld __cnfn max(int, int);\n" "uint __ovld __cnfn max(uint, uint);\n" "int2 __ovld __cnfn max(int2, int2);\n" "uint2 __ovld __cnfn max(uint2, uint2);\n" "int3 __ovld __cnfn max(int3, int3);\n" "uint3 __ovld __cnfn max(uint3, uint3);\n" "int4 __ovld __cnfn max(int4, int4);\n" "uint4 __ovld __cnfn max(uint4, uint4);\n" "int8 __ovld __cnfn max(int8, int8);\n" "uint8 __ovld __cnfn max(uint8, uint8);\n" "int16 __ovld __cnfn max(int16, int16);\n" "uint16 __ovld __cnfn max(uint16, uint16);\n" "long __ovld __cnfn max(long, long);\n" "ulong __ovld __cnfn max(ulong, ulong);\n" "long2 __ovld __cnfn max(long2, long2);\n" "ulong2 __ovld __cnfn max(ulong2, ulong2);\n" "long3 __ovld __cnfn max(long3, long3);\n" "ulong3 __ovld __cnfn max(ulong3, ulong3);\n" "long4 __ovld __cnfn max(long4, long4);\n" "ulong4 __ovld __cnfn max(ulong4, ulong4);\n" "long8 __ovld __cnfn max(long8, long8);\n" "ulong8 __ovld __cnfn max(ulong8, ulong8);\n" "long16 __ovld __cnfn max(long16, long16);\n" "ulong16 __ovld __cnfn max(ulong16, ulong16);\n" "char2 __ovld __cnfn max(char2, char);\n" "uchar2 __ovld __cnfn max(uchar2, uchar);\n" "char3 __ovld __cnfn max(char3, char);\n" "uchar3 __ovld __cnfn max(uchar3, uchar);\n" "char4 __ovld __cnfn max(char4, char);\n" "uchar4 __ovld __cnfn max(uchar4, uchar);\n" "char8 __ovld __cnfn max(char8, char);\n" "uchar8 __ovld __cnfn max(uchar8, uchar);\n" "char16 __ovld __cnfn max(char16, char);\n" "uchar16 __ovld __cnfn max(uchar16, uchar);\n" "short2 __ovld __cnfn max(short2, short);\n" "ushort2 __ovld __cnfn max(ushort2, ushort);\n" "short3 __ovld __cnfn max(short3, short);\n" "ushort3 __ovld __cnfn max(ushort3, ushort);\n" "short4 __ovld __cnfn max(short4, short);\n" "ushort4 __ovld __cnfn max(ushort4, ushort);\n" "short8 __ovld __cnfn max(short8, short);\n" "ushort8 __ovld __cnfn max(ushort8, ushort);\n" "short16 __ovld __cnfn max(short16, short);\n" "ushort16 __ovld __cnfn max(ushort16, ushort);\n" "int2 __ovld __cnfn max(int2, int);\n" "uint2 __ovld __cnfn max(uint2, uint);\n" "int3 __ovld __cnfn max(int3, int);\n" "uint3 __ovld __cnfn max(uint3, uint);\n" "int4 __ovld __cnfn max(int4, int);\n" "uint4 __ovld __cnfn max(uint4, uint);\n" "int8 __ovld __cnfn max(int8, int);\n" "uint8 __ovld __cnfn max(uint8, uint);\n" "int16 __ovld __cnfn max(int16, int);\n" "uint16 __ovld __cnfn max(uint16, uint);\n" "long2 __ovld __cnfn max(long2, long);\n" "ulong2 __ovld __cnfn max(ulong2, ulong);\n" "long3 __ovld __cnfn max(long3, long);\n" "ulong3 __ovld __cnfn max(ulong3, ulong);\n" "long4 __ovld __cnfn max(long4, long);\n" "ulong4 __ovld __cnfn max(ulong4, ulong);\n" "long8 __ovld __cnfn max(long8, long);\n" "ulong8 __ovld __cnfn max(ulong8, ulong);\n" "long16 __ovld __cnfn max(long16, long);\n" "ulong16 __ovld __cnfn max(ulong16, ulong);\n" "\n" "/**\n" " * Returns y if y < x, otherwise it returns x.\n" " */\n" "char __ovld __cnfn min(char, char);\n" "uchar __ovld __cnfn min(uchar, uchar);\n" "char2 __ovld __cnfn min(char2, char2);\n" "uchar2 __ovld __cnfn min(uchar2, uchar2);\n" "char3 __ovld __cnfn min(char3, char3);\n" "uchar3 __ovld __cnfn min(uchar3, uchar3);\n" "char4 __ovld __cnfn min(char4, char4);\n" "uchar4 __ovld __cnfn min(uchar4, uchar4);\n" "char8 __ovld __cnfn min(char8, char8);\n" "uchar8 __ovld __cnfn min(uchar8, uchar8);\n" "char16 __ovld __cnfn min(char16, char16);\n" "uchar16 __ovld __cnfn min(uchar16, uchar16);\n" "short __ovld __cnfn min(short, short);\n" "ushort __ovld __cnfn min(ushort, ushort);\n" "short2 __ovld __cnfn min(short2, short2);\n" "ushort2 __ovld __cnfn min(ushort2, ushort2);\n" "short3 __ovld __cnfn min(short3, short3);\n" "ushort3 __ovld __cnfn min(ushort3, ushort3);\n" "short4 __ovld __cnfn min(short4, short4);\n" "ushort4 __ovld __cnfn min(ushort4, ushort4);\n" "short8 __ovld __cnfn min(short8, short8);\n" "ushort8 __ovld __cnfn min(ushort8, ushort8);\n" "short16 __ovld __cnfn min(short16, short16);\n" "ushort16 __ovld __cnfn min(ushort16, ushort16);\n" "int __ovld __cnfn min(int, int);\n" "uint __ovld __cnfn min(uint, uint);\n" "int2 __ovld __cnfn min(int2, int2);\n" "uint2 __ovld __cnfn min(uint2, uint2);\n" "int3 __ovld __cnfn min(int3, int3);\n" "uint3 __ovld __cnfn min(uint3, uint3);\n" "int4 __ovld __cnfn min(int4, int4);\n" "uint4 __ovld __cnfn min(uint4, uint4);\n" "int8 __ovld __cnfn min(int8, int8);\n" "uint8 __ovld __cnfn min(uint8, uint8);\n" "int16 __ovld __cnfn min(int16, int16);\n" "uint16 __ovld __cnfn min(uint16, uint16);\n" "long __ovld __cnfn min(long, long);\n" "ulong __ovld __cnfn min(ulong, ulong);\n" "long2 __ovld __cnfn min(long2, long2);\n" "ulong2 __ovld __cnfn min(ulong2, ulong2);\n" "long3 __ovld __cnfn min(long3, long3);\n" "ulong3 __ovld __cnfn min(ulong3, ulong3);\n" "long4 __ovld __cnfn min(long4, long4);\n" "ulong4 __ovld __cnfn min(ulong4, ulong4);\n" "long8 __ovld __cnfn min(long8, long8);\n" "ulong8 __ovld __cnfn min(ulong8, ulong8);\n" "long16 __ovld __cnfn min(long16, long16);\n" "ulong16 __ovld __cnfn min(ulong16, ulong16);\n" "char2 __ovld __cnfn min(char2, char);\n" "uchar2 __ovld __cnfn min(uchar2, uchar);\n" "char3 __ovld __cnfn min(char3, char);\n" "uchar3 __ovld __cnfn min(uchar3, uchar);\n" "char4 __ovld __cnfn min(char4, char);\n" "uchar4 __ovld __cnfn min(uchar4, uchar);\n" "char8 __ovld __cnfn min(char8, char);\n" "uchar8 __ovld __cnfn min(uchar8, uchar);\n" "char16 __ovld __cnfn min(char16, char);\n" "uchar16 __ovld __cnfn min(uchar16, uchar);\n" "short2 __ovld __cnfn min(short2, short);\n" "ushort2 __ovld __cnfn min(ushort2, ushort);\n" "short3 __ovld __cnfn min(short3, short);\n" "ushort3 __ovld __cnfn min(ushort3, ushort);\n" "short4 __ovld __cnfn min(short4, short);\n" "ushort4 __ovld __cnfn min(ushort4, ushort);\n" "short8 __ovld __cnfn min(short8, short);\n" "ushort8 __ovld __cnfn min(ushort8, ushort);\n" "short16 __ovld __cnfn min(short16, short);\n" "ushort16 __ovld __cnfn min(ushort16, ushort);\n" "int2 __ovld __cnfn min(int2, int);\n" "uint2 __ovld __cnfn min(uint2, uint);\n" "int3 __ovld __cnfn min(int3, int);\n" "uint3 __ovld __cnfn min(uint3, uint);\n" "int4 __ovld __cnfn min(int4, int);\n" "uint4 __ovld __cnfn min(uint4, uint);\n" "int8 __ovld __cnfn min(int8, int);\n" "uint8 __ovld __cnfn min(uint8, uint);\n" "int16 __ovld __cnfn min(int16, int);\n" "uint16 __ovld __cnfn min(uint16, uint);\n" "long2 __ovld __cnfn min(long2, long);\n" "ulong2 __ovld __cnfn min(ulong2, ulong);\n" "long3 __ovld __cnfn min(long3, long);\n" "ulong3 __ovld __cnfn min(ulong3, ulong);\n" "long4 __ovld __cnfn min(long4, long);\n" "ulong4 __ovld __cnfn min(ulong4, ulong);\n" "long8 __ovld __cnfn min(long8, long);\n" "ulong8 __ovld __cnfn min(ulong8, ulong);\n" "long16 __ovld __cnfn min(long16, long);\n" "ulong16 __ovld __cnfn min(ulong16, ulong);\n" "\n" "/**\n" " * Computes x * y and returns the high half of the\n" " * product of x and y.\n" " */\n" "char __ovld __cnfn mul_hi(char, char);\n" "uchar __ovld __cnfn mul_hi(uchar, uchar);\n" "char2 __ovld __cnfn mul_hi(char2, char2);\n" "uchar2 __ovld __cnfn mul_hi(uchar2, uchar2);\n" "char3 __ovld __cnfn mul_hi(char3, char3);\n" "uchar3 __ovld __cnfn mul_hi(uchar3, uchar3);\n" "char4 __ovld __cnfn mul_hi(char4, char4);\n" "uchar4 __ovld __cnfn mul_hi(uchar4, uchar4);\n" "char8 __ovld __cnfn mul_hi(char8, char8);\n" "uchar8 __ovld __cnfn mul_hi(uchar8, uchar8);\n" "char16 __ovld __cnfn mul_hi(char16, char16);\n" "uchar16 __ovld __cnfn mul_hi(uchar16, uchar16);\n" "short __ovld __cnfn mul_hi(short, short);\n" "ushort __ovld __cnfn mul_hi(ushort, ushort);\n" "short2 __ovld __cnfn mul_hi(short2, short2);\n" "ushort2 __ovld __cnfn mul_hi(ushort2, ushort2);\n" "short3 __ovld __cnfn mul_hi(short3, short3);\n" "ushort3 __ovld __cnfn mul_hi(ushort3, ushort3);\n" "short4 __ovld __cnfn mul_hi(short4, short4);\n" "ushort4 __ovld __cnfn mul_hi(ushort4, ushort4);\n" "short8 __ovld __cnfn mul_hi(short8, short8);\n" "ushort8 __ovld __cnfn mul_hi(ushort8, ushort8);\n" "short16 __ovld __cnfn mul_hi(short16, short16);\n" "ushort16 __ovld __cnfn mul_hi(ushort16, ushort16);\n" "int __ovld __cnfn mul_hi(int, int);\n" "uint __ovld __cnfn mul_hi(uint, uint);\n" "int2 __ovld __cnfn mul_hi(int2, int2);\n" "uint2 __ovld __cnfn mul_hi(uint2, uint2);\n" "int3 __ovld __cnfn mul_hi(int3, int3);\n" "uint3 __ovld __cnfn mul_hi(uint3, uint3);\n" "int4 __ovld __cnfn mul_hi(int4, int4);\n" "uint4 __ovld __cnfn mul_hi(uint4, uint4);\n" "int8 __ovld __cnfn mul_hi(int8, int8);\n" "uint8 __ovld __cnfn mul_hi(uint8, uint8);\n" "int16 __ovld __cnfn mul_hi(int16, int16);\n" "uint16 __ovld __cnfn mul_hi(uint16, uint16);\n" "long __ovld __cnfn mul_hi(long, long);\n" "ulong __ovld __cnfn mul_hi(ulong, ulong);\n" "long2 __ovld __cnfn mul_hi(long2, long2);\n" "ulong2 __ovld __cnfn mul_hi(ulong2, ulong2);\n" "long3 __ovld __cnfn mul_hi(long3, long3);\n" "ulong3 __ovld __cnfn mul_hi(ulong3, ulong3);\n" "long4 __ovld __cnfn mul_hi(long4, long4);\n" "ulong4 __ovld __cnfn mul_hi(ulong4, ulong4);\n" "long8 __ovld __cnfn mul_hi(long8, long8);\n" "ulong8 __ovld __cnfn mul_hi(ulong8, ulong8);\n" "long16 __ovld __cnfn mul_hi(long16, long16);\n" "ulong16 __ovld __cnfn mul_hi(ulong16, ulong16);\n" "\n" "/**\n" " * For each element in v, the bits are shifted left by\n" " * the number of bits given by the corresponding\n" " * element in i (subject to usual shift modulo rules\n" " * described in section 6.3). Bits shifted off the left\n" " * side of the element are shifted back in from the\n" " * right.\n" " */\n" "char __ovld __cnfn rotate(char, char);\n" "uchar __ovld __cnfn rotate(uchar, uchar);\n" "char2 __ovld __cnfn rotate(char2, char2);\n" "uchar2 __ovld __cnfn rotate(uchar2, uchar2);\n" "char3 __ovld __cnfn rotate(char3, char3);\n" "uchar3 __ovld __cnfn rotate(uchar3, uchar3);\n" "char4 __ovld __cnfn rotate(char4, char4);\n" "uchar4 __ovld __cnfn rotate(uchar4, uchar4);\n" "char8 __ovld __cnfn rotate(char8, char8);\n" "uchar8 __ovld __cnfn rotate(uchar8, uchar8);\n" "char16 __ovld __cnfn rotate(char16, char16);\n" "uchar16 __ovld __cnfn rotate(uchar16, uchar16);\n" "short __ovld __cnfn rotate(short, short);\n" "ushort __ovld __cnfn rotate(ushort, ushort);\n" "short2 __ovld __cnfn rotate(short2, short2);\n" "ushort2 __ovld __cnfn rotate(ushort2, ushort2);\n" "short3 __ovld __cnfn rotate(short3, short3);\n" "ushort3 __ovld __cnfn rotate(ushort3, ushort3);\n" "short4 __ovld __cnfn rotate(short4, short4);\n" "ushort4 __ovld __cnfn rotate(ushort4, ushort4);\n" "short8 __ovld __cnfn rotate(short8, short8);\n" "ushort8 __ovld __cnfn rotate(ushort8, ushort8);\n" "short16 __ovld __cnfn rotate(short16, short16);\n" "ushort16 __ovld __cnfn rotate(ushort16, ushort16);\n" "int __ovld __cnfn rotate(int, int);\n" "uint __ovld __cnfn rotate(uint, uint);\n" "int2 __ovld __cnfn rotate(int2, int2);\n" "uint2 __ovld __cnfn rotate(uint2, uint2);\n" "int3 __ovld __cnfn rotate(int3, int3);\n" "uint3 __ovld __cnfn rotate(uint3, uint3);\n" "int4 __ovld __cnfn rotate(int4, int4);\n" "uint4 __ovld __cnfn rotate(uint4, uint4);\n" "int8 __ovld __cnfn rotate(int8, int8);\n" "uint8 __ovld __cnfn rotate(uint8, uint8);\n" "int16 __ovld __cnfn rotate(int16, int16);\n" "uint16 __ovld __cnfn rotate(uint16, uint16);\n" "long __ovld __cnfn rotate(long, long);\n" "ulong __ovld __cnfn rotate(ulong, ulong);\n" "long2 __ovld __cnfn rotate(long2, long2);\n" "ulong2 __ovld __cnfn rotate(ulong2, ulong2);\n" "long3 __ovld __cnfn rotate(long3, long3);\n" "ulong3 __ovld __cnfn rotate(ulong3, ulong3);\n" "long4 __ovld __cnfn rotate(long4, long4);\n" "ulong4 __ovld __cnfn rotate(ulong4, ulong4);\n" "long8 __ovld __cnfn rotate(long8, long8);\n" "ulong8 __ovld __cnfn rotate(ulong8, ulong8);\n" "long16 __ovld __cnfn rotate(long16, long16);\n" "ulong16 __ovld __cnfn rotate(ulong16, ulong16);\n" "\n" "/**\n" " * Returns x - y and saturates the result.\n" " */\n" "char __ovld __cnfn sub_sat(char, char);\n" "uchar __ovld __cnfn sub_sat(uchar, uchar);\n" "char2 __ovld __cnfn sub_sat(char2, char2);\n" "uchar2 __ovld __cnfn sub_sat(uchar2, uchar2);\n" "char3 __ovld __cnfn sub_sat(char3, char3);\n" "uchar3 __ovld __cnfn sub_sat(uchar3, uchar3);\n" "char4 __ovld __cnfn sub_sat(char4, char4);\n" "uchar4 __ovld __cnfn sub_sat(uchar4, uchar4);\n" "char8 __ovld __cnfn sub_sat(char8, char8);\n" "uchar8 __ovld __cnfn sub_sat(uchar8, uchar8);\n" "char16 __ovld __cnfn sub_sat(char16, char16);\n" "uchar16 __ovld __cnfn sub_sat(uchar16, uchar16);\n" "short __ovld __cnfn sub_sat(short, short);\n" "ushort __ovld __cnfn sub_sat(ushort, ushort);\n" "short2 __ovld __cnfn sub_sat(short2, short2);\n" "ushort2 __ovld __cnfn sub_sat(ushort2, ushort2);\n" "short3 __ovld __cnfn sub_sat(short3, short3);\n" "ushort3 __ovld __cnfn sub_sat(ushort3, ushort3);\n" "short4 __ovld __cnfn sub_sat(short4, short4);\n" "ushort4 __ovld __cnfn sub_sat(ushort4, ushort4);\n" "short8 __ovld __cnfn sub_sat(short8, short8);\n" "ushort8 __ovld __cnfn sub_sat(ushort8, ushort8);\n" "short16 __ovld __cnfn sub_sat(short16, short16);\n" "ushort16 __ovld __cnfn sub_sat(ushort16, ushort16);\n" "int __ovld __cnfn sub_sat(int, int);\n" "uint __ovld __cnfn sub_sat(uint, uint);\n" "int2 __ovld __cnfn sub_sat(int2, int2);\n" "uint2 __ovld __cnfn sub_sat(uint2, uint2);\n" "int3 __ovld __cnfn sub_sat(int3, int3);\n" "uint3 __ovld __cnfn sub_sat(uint3, uint3);\n" "int4 __ovld __cnfn sub_sat(int4, int4);\n" "uint4 __ovld __cnfn sub_sat(uint4, uint4);\n" "int8 __ovld __cnfn sub_sat(int8, int8);\n" "uint8 __ovld __cnfn sub_sat(uint8, uint8);\n" "int16 __ovld __cnfn sub_sat(int16, int16);\n" "uint16 __ovld __cnfn sub_sat(uint16, uint16);\n" "long __ovld __cnfn sub_sat(long, long);\n" "ulong __ovld __cnfn sub_sat(ulong, ulong);\n" "long2 __ovld __cnfn sub_sat(long2, long2);\n" "ulong2 __ovld __cnfn sub_sat(ulong2, ulong2);\n" "long3 __ovld __cnfn sub_sat(long3, long3);\n" "ulong3 __ovld __cnfn sub_sat(ulong3, ulong3);\n" "long4 __ovld __cnfn sub_sat(long4, long4);\n" "ulong4 __ovld __cnfn sub_sat(ulong4, ulong4);\n" "long8 __ovld __cnfn sub_sat(long8, long8);\n" "ulong8 __ovld __cnfn sub_sat(ulong8, ulong8);\n" "long16 __ovld __cnfn sub_sat(long16, long16);\n" "ulong16 __ovld __cnfn sub_sat(ulong16, ulong16);\n" "\n" "/**\n" " * result[i] = ((short)hi[i] << 8) | lo[i]\n" " * result[i] = ((ushort)hi[i] << 8) | lo[i]\n" " */\n" "short __ovld __cnfn upsample(char, uchar);\n" "ushort __ovld __cnfn upsample(uchar, uchar);\n" "short2 __ovld __cnfn upsample(char2, uchar2);\n" "short3 __ovld __cnfn upsample(char3, uchar3);\n" "short4 __ovld __cnfn upsample(char4, uchar4);\n" "short8 __ovld __cnfn upsample(char8, uchar8);\n" "short16 __ovld __cnfn upsample(char16, uchar16);\n" "ushort2 __ovld __cnfn upsample(uchar2, uchar2);\n" "ushort3 __ovld __cnfn upsample(uchar3, uchar3);\n" "ushort4 __ovld __cnfn upsample(uchar4, uchar4);\n" "ushort8 __ovld __cnfn upsample(uchar8, uchar8);\n" "ushort16 __ovld __cnfn upsample(uchar16, uchar16);\n" "\n" "/**\n" " * result[i] = ((int)hi[i] << 16) | lo[i]\n" " * result[i] = ((uint)hi[i] << 16) | lo[i]\n" " */\n" "int __ovld __cnfn upsample(short, ushort);\n" "uint __ovld __cnfn upsample(ushort, ushort);\n" "int2 __ovld __cnfn upsample(short2, ushort2);\n" "int3 __ovld __cnfn upsample(short3, ushort3);\n" "int4 __ovld __cnfn upsample(short4, ushort4);\n" "int8 __ovld __cnfn upsample(short8, ushort8);\n" "int16 __ovld __cnfn upsample(short16, ushort16);\n" "uint2 __ovld __cnfn upsample(ushort2, ushort2);\n" "uint3 __ovld __cnfn upsample(ushort3, ushort3);\n" "uint4 __ovld __cnfn upsample(ushort4, ushort4);\n" "uint8 __ovld __cnfn upsample(ushort8, ushort8);\n" "uint16 __ovld __cnfn upsample(ushort16, ushort16);\n" "/**\n" " * result[i] = ((long)hi[i] << 32) | lo[i]\n" " * result[i] = ((ulong)hi[i] << 32) | lo[i]\n" " */\n" "long __ovld __cnfn upsample(int, uint);\n" "ulong __ovld __cnfn upsample(uint, uint);\n" "long2 __ovld __cnfn upsample(int2, uint2);\n" "long3 __ovld __cnfn upsample(int3, uint3);\n" "long4 __ovld __cnfn upsample(int4, uint4);\n" "long8 __ovld __cnfn upsample(int8, uint8);\n" "long16 __ovld __cnfn upsample(int16, uint16);\n" "ulong2 __ovld __cnfn upsample(uint2, uint2);\n" "ulong3 __ovld __cnfn upsample(uint3, uint3);\n" "ulong4 __ovld __cnfn upsample(uint4, uint4);\n" "ulong8 __ovld __cnfn upsample(uint8, uint8);\n" "ulong16 __ovld __cnfn upsample(uint16, uint16);\n" "\n" "/*\n" " * popcount(x): returns the number of set bit in x\n" " */\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "char __ovld __cnfn popcount(char);\n" "uchar __ovld __cnfn popcount(uchar);\n" "char2 __ovld __cnfn popcount(char2);\n" "uchar2 __ovld __cnfn popcount(uchar2);\n" "char3 __ovld __cnfn popcount(char3);\n" "uchar3 __ovld __cnfn popcount(uchar3);\n" "char4 __ovld __cnfn popcount(char4);\n" "uchar4 __ovld __cnfn popcount(uchar4);\n" "char8 __ovld __cnfn popcount(char8);\n" "uchar8 __ovld __cnfn popcount(uchar8);\n" "char16 __ovld __cnfn popcount(char16);\n" "uchar16 __ovld __cnfn popcount(uchar16);\n" "short __ovld __cnfn popcount(short);\n" "ushort __ovld __cnfn popcount(ushort);\n" "short2 __ovld __cnfn popcount(short2);\n" "ushort2 __ovld __cnfn popcount(ushort2);\n" "short3 __ovld __cnfn popcount(short3);\n" "ushort3 __ovld __cnfn popcount(ushort3);\n" "short4 __ovld __cnfn popcount(short4);\n" "ushort4 __ovld __cnfn popcount(ushort4);\n" "short8 __ovld __cnfn popcount(short8);\n" "ushort8 __ovld __cnfn popcount(ushort8);\n" "short16 __ovld __cnfn popcount(short16);\n" "ushort16 __ovld __cnfn popcount(ushort16);\n" "int __ovld __cnfn popcount(int);\n" "uint __ovld __cnfn popcount(uint);\n" "int2 __ovld __cnfn popcount(int2);\n" "uint2 __ovld __cnfn popcount(uint2);\n" "int3 __ovld __cnfn popcount(int3);\n" "uint3 __ovld __cnfn popcount(uint3);\n" "int4 __ovld __cnfn popcount(int4);\n" "uint4 __ovld __cnfn popcount(uint4);\n" "int8 __ovld __cnfn popcount(int8);\n" "uint8 __ovld __cnfn popcount(uint8);\n" "int16 __ovld __cnfn popcount(int16);\n" "uint16 __ovld __cnfn popcount(uint16);\n" "long __ovld __cnfn popcount(long);\n" "ulong __ovld __cnfn popcount(ulong);\n" "long2 __ovld __cnfn popcount(long2);\n" "ulong2 __ovld __cnfn popcount(ulong2);\n" "long3 __ovld __cnfn popcount(long3);\n" "ulong3 __ovld __cnfn popcount(ulong3);\n" "long4 __ovld __cnfn popcount(long4);\n" "ulong4 __ovld __cnfn popcount(ulong4);\n" "long8 __ovld __cnfn popcount(long8);\n" "ulong8 __ovld __cnfn popcount(ulong8);\n" "long16 __ovld __cnfn popcount(long16);\n" "ulong16 __ovld __cnfn popcount(ulong16);\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "\n" "/**\n" " * Multiply two 24-bit integer values x and y and add\n" " * the 32-bit integer result to the 32-bit integer z.\n" " * Refer to definition of mul24 to see how the 24-bit\n" " * integer multiplication is performed.\n" " */\n" "int __ovld __cnfn mad24(int, int, int);\n" "uint __ovld __cnfn mad24(uint, uint, uint);\n" "int2 __ovld __cnfn mad24(int2, int2, int2);\n" "uint2 __ovld __cnfn mad24(uint2, uint2, uint2);\n" "int3 __ovld __cnfn mad24(int3, int3, int3);\n" "uint3 __ovld __cnfn mad24(uint3, uint3, uint3);\n" "int4 __ovld __cnfn mad24(int4, int4, int4);\n" "uint4 __ovld __cnfn mad24(uint4, uint4, uint4);\n" "int8 __ovld __cnfn mad24(int8, int8, int8);\n" "uint8 __ovld __cnfn mad24(uint8, uint8, uint8);\n" "int16 __ovld __cnfn mad24(int16, int16, int16);\n" "uint16 __ovld __cnfn mad24(uint16, uint16, uint16);\n" "\n" "/**\n" " * Multiply two 24-bit integer values x and y. x and y\n" " * are 32-bit integers but only the low 24-bits are used\n" " * to perform the multiplication. mul24 should only\n" " * be used when values in x and y are in the range [-\n" " * 2^23, 2^23-1] if x and y are signed integers and in the\n" " * range [0, 2^24-1] if x and y are unsigned integers. If\n" " * x and y are not in this range, the multiplication\n" " * result is implementation-defined.\n" " */\n" "int __ovld __cnfn mul24(int, int);\n" "uint __ovld __cnfn mul24(uint, uint);\n" "int2 __ovld __cnfn mul24(int2, int2);\n" "uint2 __ovld __cnfn mul24(uint2, uint2);\n" "int3 __ovld __cnfn mul24(int3, int3);\n" "uint3 __ovld __cnfn mul24(uint3, uint3);\n" "int4 __ovld __cnfn mul24(int4, int4);\n" "uint4 __ovld __cnfn mul24(uint4, uint4);\n" "int8 __ovld __cnfn mul24(int8, int8);\n" "uint8 __ovld __cnfn mul24(uint8, uint8);\n" "int16 __ovld __cnfn mul24(int16, int16);\n" "uint16 __ovld __cnfn mul24(uint16, uint16);\n" "\n" "// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions\n" "\n" "/**\n" " * Returns fmin(fmax(x, minval), maxval).\n" " * Results are undefined if minval > maxval.\n" " */\n" "float __ovld __cnfn clamp(float, float, float);\n" "float2 __ovld __cnfn clamp(float2, float2, float2);\n" "float3 __ovld __cnfn clamp(float3, float3, float3);\n" "float4 __ovld __cnfn clamp(float4, float4, float4);\n" "float8 __ovld __cnfn clamp(float8, float8, float8);\n" "float16 __ovld __cnfn clamp(float16, float16, float16);\n" "float2 __ovld __cnfn clamp(float2, float, float);\n" "float3 __ovld __cnfn clamp(float3, float, float);\n" "float4 __ovld __cnfn clamp(float4, float, float);\n" "float8 __ovld __cnfn clamp(float8, float, float);\n" "float16 __ovld __cnfn clamp(float16, float, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn clamp(double, double, double);\n" "double2 __ovld __cnfn clamp(double2, double2, double2);\n" "double3 __ovld __cnfn clamp(double3, double3, double3);\n" "double4 __ovld __cnfn clamp(double4, double4, double4);\n" "double8 __ovld __cnfn clamp(double8, double8, double8);\n" "double16 __ovld __cnfn clamp(double16, double16, double16);\n" "double2 __ovld __cnfn clamp(double2, double, double);\n" "double3 __ovld __cnfn clamp(double3, double, double);\n" "double4 __ovld __cnfn clamp(double4, double, double);\n" "double8 __ovld __cnfn clamp(double8, double, double);\n" "double16 __ovld __cnfn clamp(double16, double, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn clamp(half, half, half);\n" "half2 __ovld __cnfn clamp(half2, half2, half2);\n" "half3 __ovld __cnfn clamp(half3, half3, half3);\n" "half4 __ovld __cnfn clamp(half4, half4, half4);\n" "half8 __ovld __cnfn clamp(half8, half8, half8);\n" "half16 __ovld __cnfn clamp(half16, half16, half16);\n" "half2 __ovld __cnfn clamp(half2, half, half);\n" "half3 __ovld __cnfn clamp(half3, half, half);\n" "half4 __ovld __cnfn clamp(half4, half, half);\n" "half8 __ovld __cnfn clamp(half8, half, half);\n" "half16 __ovld __cnfn clamp(half16, half, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Converts radians to degrees, i.e. (180 / PI) *\n" " * radians.\n" " */\n" "float __ovld __cnfn degrees(float);\n" "float2 __ovld __cnfn degrees(float2);\n" "float3 __ovld __cnfn degrees(float3);\n" "float4 __ovld __cnfn degrees(float4);\n" "float8 __ovld __cnfn degrees(float8);\n" "float16 __ovld __cnfn degrees(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn degrees(double);\n" "double2 __ovld __cnfn degrees(double2);\n" "double3 __ovld __cnfn degrees(double3);\n" "double4 __ovld __cnfn degrees(double4);\n" "double8 __ovld __cnfn degrees(double8);\n" "double16 __ovld __cnfn degrees(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn degrees(half);\n" "half2 __ovld __cnfn degrees(half2);\n" "half3 __ovld __cnfn degrees(half3);\n" "half4 __ovld __cnfn degrees(half4);\n" "half8 __ovld __cnfn degrees(half8);\n" "half16 __ovld __cnfn degrees(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns y if x < y, otherwise it returns x. If x and y\n" " * are infinite or NaN, the return values are undefined.\n" " */\n" "float __ovld __cnfn max(float, float);\n" "float2 __ovld __cnfn max(float2, float2);\n" "float3 __ovld __cnfn max(float3, float3);\n" "float4 __ovld __cnfn max(float4, float4);\n" "float8 __ovld __cnfn max(float8, float8);\n" "float16 __ovld __cnfn max(float16, float16);\n" "float2 __ovld __cnfn max(float2, float);\n" "float3 __ovld __cnfn max(float3, float);\n" "float4 __ovld __cnfn max(float4, float);\n" "float8 __ovld __cnfn max(float8, float);\n" "float16 __ovld __cnfn max(float16, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn max(double, double);\n" "double2 __ovld __cnfn max(double2, double2);\n" "double3 __ovld __cnfn max(double3, double3);\n" "double4 __ovld __cnfn max(double4, double4);\n" "double8 __ovld __cnfn max(double8, double8);\n" "double16 __ovld __cnfn max(double16, double16);\n" "double2 __ovld __cnfn max(double2, double);\n" "double3 __ovld __cnfn max(double3, double);\n" "double4 __ovld __cnfn max(double4, double);\n" "double8 __ovld __cnfn max(double8, double);\n" "double16 __ovld __cnfn max(double16, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn max(half, half);\n" "half2 __ovld __cnfn max(half2, half2);\n" "half3 __ovld __cnfn max(half3, half3);\n" "half4 __ovld __cnfn max(half4, half4);\n" "half8 __ovld __cnfn max(half8, half8);\n" "half16 __ovld __cnfn max(half16, half16);\n" "half2 __ovld __cnfn max(half2, half);\n" "half3 __ovld __cnfn max(half3, half);\n" "half4 __ovld __cnfn max(half4, half);\n" "half8 __ovld __cnfn max(half8, half);\n" "half16 __ovld __cnfn max(half16, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns y if y < x, otherwise it returns x. If x and y\n" " * are infinite or NaN, the return values are undefined.\n" " */\n" "float __ovld __cnfn min(float, float);\n" "float2 __ovld __cnfn min(float2, float2);\n" "float3 __ovld __cnfn min(float3, float3);\n" "float4 __ovld __cnfn min(float4, float4);\n" "float8 __ovld __cnfn min(float8, float8);\n" "float16 __ovld __cnfn min(float16, float16);\n" "float2 __ovld __cnfn min(float2, float);\n" "float3 __ovld __cnfn min(float3, float);\n" "float4 __ovld __cnfn min(float4, float);\n" "float8 __ovld __cnfn min(float8, float);\n" "float16 __ovld __cnfn min(float16, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn min(double, double);\n" "double2 __ovld __cnfn min(double2, double2);\n" "double3 __ovld __cnfn min(double3, double3);\n" "double4 __ovld __cnfn min(double4, double4);\n" "double8 __ovld __cnfn min(double8, double8);\n" "double16 __ovld __cnfn min(double16, double16);\n" "double2 __ovld __cnfn min(double2, double);\n" "double3 __ovld __cnfn min(double3, double);\n" "double4 __ovld __cnfn min(double4, double);\n" "double8 __ovld __cnfn min(double8, double);\n" "double16 __ovld __cnfn min(double16, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn min(half, half);\n" "half2 __ovld __cnfn min(half2, half2);\n" "half3 __ovld __cnfn min(half3, half3);\n" "half4 __ovld __cnfn min(half4, half4);\n" "half8 __ovld __cnfn min(half8, half8);\n" "half16 __ovld __cnfn min(half16, half16);\n" "half2 __ovld __cnfn min(half2, half);\n" "half3 __ovld __cnfn min(half3, half);\n" "half4 __ovld __cnfn min(half4, half);\n" "half8 __ovld __cnfn min(half8, half);\n" "half16 __ovld __cnfn min(half16, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the linear blend of x & y implemented as:\n" " * x + (y - x) * a\n" " * a must be a value in the range 0.0 ... 1.0. If a is not\n" " * in the range 0.0 ... 1.0, the return values are\n" " * undefined.\n" " */\n" "float __ovld __cnfn mix(float, float, float);\n" "float2 __ovld __cnfn mix(float2, float2, float2);\n" "float3 __ovld __cnfn mix(float3, float3, float3);\n" "float4 __ovld __cnfn mix(float4, float4, float4);\n" "float8 __ovld __cnfn mix(float8, float8, float8);\n" "float16 __ovld __cnfn mix(float16, float16, float16);\n" "float2 __ovld __cnfn mix(float2, float2, float);\n" "float3 __ovld __cnfn mix(float3, float3, float);\n" "float4 __ovld __cnfn mix(float4, float4, float);\n" "float8 __ovld __cnfn mix(float8, float8, float);\n" "float16 __ovld __cnfn mix(float16, float16, float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn mix(double, double, double);\n" "double2 __ovld __cnfn mix(double2, double2, double2);\n" "double3 __ovld __cnfn mix(double3, double3, double3);\n" "double4 __ovld __cnfn mix(double4, double4, double4);\n" "double8 __ovld __cnfn mix(double8, double8, double8);\n" "double16 __ovld __cnfn mix(double16, double16, double16);\n" "double2 __ovld __cnfn mix(double2, double2, double);\n" "double3 __ovld __cnfn mix(double3, double3, double);\n" "double4 __ovld __cnfn mix(double4, double4, double);\n" "double8 __ovld __cnfn mix(double8, double8, double);\n" "double16 __ovld __cnfn mix(double16, double16, double);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn mix(half, half, half);\n" "half2 __ovld __cnfn mix(half2, half2, half2);\n" "half3 __ovld __cnfn mix(half3, half3, half3);\n" "half4 __ovld __cnfn mix(half4, half4, half4);\n" "half8 __ovld __cnfn mix(half8, half8, half8);\n" "half16 __ovld __cnfn mix(half16, half16, half16);\n" "half2 __ovld __cnfn mix(half2, half2, half);\n" "half3 __ovld __cnfn mix(half3, half3, half);\n" "half4 __ovld __cnfn mix(half4, half4, half);\n" "half8 __ovld __cnfn mix(half8, half8, half);\n" "half16 __ovld __cnfn mix(half16, half16, half);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Converts degrees to radians, i.e. (PI / 180) *\n" " * degrees.\n" " */\n" "float __ovld __cnfn radians(float);\n" "float2 __ovld __cnfn radians(float2);\n" "float3 __ovld __cnfn radians(float3);\n" "float4 __ovld __cnfn radians(float4);\n" "float8 __ovld __cnfn radians(float8);\n" "float16 __ovld __cnfn radians(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn radians(double);\n" "double2 __ovld __cnfn radians(double2);\n" "double3 __ovld __cnfn radians(double3);\n" "double4 __ovld __cnfn radians(double4);\n" "double8 __ovld __cnfn radians(double8);\n" "double16 __ovld __cnfn radians(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn radians(half);\n" "half2 __ovld __cnfn radians(half2);\n" "half3 __ovld __cnfn radians(half3);\n" "half4 __ovld __cnfn radians(half4);\n" "half8 __ovld __cnfn radians(half8);\n" "half16 __ovld __cnfn radians(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns 0.0 if x < edge, otherwise it returns 1.0.\n" " */\n" "float __ovld __cnfn step(float, float);\n" "float2 __ovld __cnfn step(float2, float2);\n" "float3 __ovld __cnfn step(float3, float3);\n" "float4 __ovld __cnfn step(float4, float4);\n" "float8 __ovld __cnfn step(float8, float8);\n" "float16 __ovld __cnfn step(float16, float16);\n" "float2 __ovld __cnfn step(float, float2);\n" "float3 __ovld __cnfn step(float, float3);\n" "float4 __ovld __cnfn step(float, float4);\n" "float8 __ovld __cnfn step(float, float8);\n" "float16 __ovld __cnfn step(float, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn step(double, double);\n" "double2 __ovld __cnfn step(double2, double2);\n" "double3 __ovld __cnfn step(double3, double3);\n" "double4 __ovld __cnfn step(double4, double4);\n" "double8 __ovld __cnfn step(double8, double8);\n" "double16 __ovld __cnfn step(double16, double16);\n" "double2 __ovld __cnfn step(double, double2);\n" "double3 __ovld __cnfn step(double, double3);\n" "double4 __ovld __cnfn step(double, double4);\n" "double8 __ovld __cnfn step(double, double8);\n" "double16 __ovld __cnfn step(double, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn step(half, half);\n" "half2 __ovld __cnfn step(half2, half2);\n" "half3 __ovld __cnfn step(half3, half3);\n" "half4 __ovld __cnfn step(half4, half4);\n" "half8 __ovld __cnfn step(half8, half8);\n" "half16 __ovld __cnfn step(half16, half16);\n" "half2 __ovld __cnfn step(half, half2);\n" "half3 __ovld __cnfn step(half, half3);\n" "half4 __ovld __cnfn step(half, half4);\n" "half8 __ovld __cnfn step(half, half8);\n" "half16 __ovld __cnfn step(half, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and\n" " * performs smooth Hermite interpolation between 0\n" " * and 1when edge0 < x < edge1. This is useful in\n" " * cases where you would want a threshold function\n" " * with a smooth transition.\n" " * This is equivalent to:\n" " * gentype t;\n" " * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);\n" " * return t * t * (3 - 2 * t);\n" " * Results are undefined if edge0 >= edge1 or if x,\n" " * edge0 or edge1 is a NaN.\n" " */\n" "float __ovld __cnfn smoothstep(float, float, float);\n" "float2 __ovld __cnfn smoothstep(float2, float2, float2);\n" "float3 __ovld __cnfn smoothstep(float3, float3, float3);\n" "float4 __ovld __cnfn smoothstep(float4, float4, float4);\n" "float8 __ovld __cnfn smoothstep(float8, float8, float8);\n" "float16 __ovld __cnfn smoothstep(float16, float16, float16);\n" "float2 __ovld __cnfn smoothstep(float, float, float2);\n" "float3 __ovld __cnfn smoothstep(float, float, float3);\n" "float4 __ovld __cnfn smoothstep(float, float, float4);\n" "float8 __ovld __cnfn smoothstep(float, float, float8);\n" "float16 __ovld __cnfn smoothstep(float, float, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn smoothstep(double, double, double);\n" "double2 __ovld __cnfn smoothstep(double2, double2, double2);\n" "double3 __ovld __cnfn smoothstep(double3, double3, double3);\n" "double4 __ovld __cnfn smoothstep(double4, double4, double4);\n" "double8 __ovld __cnfn smoothstep(double8, double8, double8);\n" "double16 __ovld __cnfn smoothstep(double16, double16, double16);\n" "double2 __ovld __cnfn smoothstep(double, double, double2);\n" "double3 __ovld __cnfn smoothstep(double, double, double3);\n" "double4 __ovld __cnfn smoothstep(double, double, double4);\n" "double8 __ovld __cnfn smoothstep(double, double, double8);\n" "double16 __ovld __cnfn smoothstep(double, double, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn smoothstep(half, half, half);\n" "half2 __ovld __cnfn smoothstep(half2, half2, half2);\n" "half3 __ovld __cnfn smoothstep(half3, half3, half3);\n" "half4 __ovld __cnfn smoothstep(half4, half4, half4);\n" "half8 __ovld __cnfn smoothstep(half8, half8, half8);\n" "half16 __ovld __cnfn smoothstep(half16, half16, half16);\n" "half2 __ovld __cnfn smoothstep(half, half, half2);\n" "half3 __ovld __cnfn smoothstep(half, half, half3);\n" "half4 __ovld __cnfn smoothstep(half, half, half4);\n" "half8 __ovld __cnfn smoothstep(half, half, half8);\n" "half16 __ovld __cnfn smoothstep(half, half, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =\n" " * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.\n" " */\n" "float __ovld __cnfn sign(float);\n" "float2 __ovld __cnfn sign(float2);\n" "float3 __ovld __cnfn sign(float3);\n" "float4 __ovld __cnfn sign(float4);\n" "float8 __ovld __cnfn sign(float8);\n" "float16 __ovld __cnfn sign(float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn sign(double);\n" "double2 __ovld __cnfn sign(double2);\n" "double3 __ovld __cnfn sign(double3);\n" "double4 __ovld __cnfn sign(double4);\n" "double8 __ovld __cnfn sign(double8);\n" "double16 __ovld __cnfn sign(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn sign(half);\n" "half2 __ovld __cnfn sign(half2);\n" "half3 __ovld __cnfn sign(half3);\n" "half4 __ovld __cnfn sign(half4);\n" "half8 __ovld __cnfn sign(half8);\n" "half16 __ovld __cnfn sign(half16);\n" "#endif //cl_khr_fp16\n" "\n" "// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions\n" "\n" "/**\n" " * Returns the cross product of p0.xyz and p1.xyz. The\n" " * w component of float4 result returned will be 0.0.\n" " */\n" "float4 __ovld __cnfn cross(float4, float4);\n" "float3 __ovld __cnfn cross(float3, float3);\n" "#ifdef cl_khr_fp64\n" "double4 __ovld __cnfn cross(double4, double4);\n" "double3 __ovld __cnfn cross(double3, double3);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half4 __ovld __cnfn cross(half4, half4);\n" "half3 __ovld __cnfn cross(half3, half3);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Compute dot product.\n" " */\n" "float __ovld __cnfn dot(float, float);\n" "float __ovld __cnfn dot(float2, float2);\n" "float __ovld __cnfn dot(float3, float3);\n" "float __ovld __cnfn dot(float4, float4);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn dot(double, double);\n" "double __ovld __cnfn dot(double2, double2);\n" "double __ovld __cnfn dot(double3, double3);\n" "double __ovld __cnfn dot(double4, double4);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn dot(half, half);\n" "half __ovld __cnfn dot(half2, half2);\n" "half __ovld __cnfn dot(half3, half3);\n" "half __ovld __cnfn dot(half4, half4);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the distance between p0 and p1. This is\n" " * calculated as length(p0 - p1).\n" " */\n" "float __ovld __cnfn distance(float, float);\n" "float __ovld __cnfn distance(float2, float2);\n" "float __ovld __cnfn distance(float3, float3);\n" "float __ovld __cnfn distance(float4, float4);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn distance(double, double);\n" "double __ovld __cnfn distance(double2, double2);\n" "double __ovld __cnfn distance(double3, double3);\n" "double __ovld __cnfn distance(double4, double4);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn distance(half, half);\n" "half __ovld __cnfn distance(half2, half2);\n" "half __ovld __cnfn distance(half3, half3);\n" "half __ovld __cnfn distance(half4, half4);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Return the length of vector p, i.e.,\n" " * sqrt(p.x2 + p.y 2 + ...)\n" " */\n" "float __ovld __cnfn length(float);\n" "float __ovld __cnfn length(float2);\n" "float __ovld __cnfn length(float3);\n" "float __ovld __cnfn length(float4);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn length(double);\n" "double __ovld __cnfn length(double2);\n" "double __ovld __cnfn length(double3);\n" "double __ovld __cnfn length(double4);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn length(half);\n" "half __ovld __cnfn length(half2);\n" "half __ovld __cnfn length(half3);\n" "half __ovld __cnfn length(half4);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns a vector in the same direction as p but with a\n" " * length of 1.\n" " */\n" "float __ovld __cnfn normalize(float);\n" "float2 __ovld __cnfn normalize(float2);\n" "float3 __ovld __cnfn normalize(float3);\n" "float4 __ovld __cnfn normalize(float4);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn normalize(double);\n" "double2 __ovld __cnfn normalize(double2);\n" "double3 __ovld __cnfn normalize(double3);\n" "double4 __ovld __cnfn normalize(double4);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn normalize(half);\n" "half2 __ovld __cnfn normalize(half2);\n" "half3 __ovld __cnfn normalize(half3);\n" "half4 __ovld __cnfn normalize(half4);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns fast_length(p0 - p1).\n" " */\n" "float __ovld __cnfn fast_distance(float, float);\n" "float __ovld __cnfn fast_distance(float2, float2);\n" "float __ovld __cnfn fast_distance(float3, float3);\n" "float __ovld __cnfn fast_distance(float4, float4);\n" "\n" "/**\n" " * Returns the length of vector p computed as:\n" " * half_sqrt(p.x2 + p.y2 + ...)\n" " */\n" "float __ovld __cnfn fast_length(float);\n" "float __ovld __cnfn fast_length(float2);\n" "float __ovld __cnfn fast_length(float3);\n" "float __ovld __cnfn fast_length(float4);\n" "\n" "/**\n" " * Returns a vector in the same direction as p but with a\n" " * length of 1. fast_normalize is computed as:\n" " * p * half_rsqrt (p.x^2 + p.y^2 + ... )\n" " * The result shall be within 8192 ulps error from the\n" " * infinitely precise result of\n" " * if (all(p == 0.0f))\n" " * result = p;\n" " * else\n" " * result = p / sqrt (p.x^2 + p.y^2 + ...);\n" " * with the following exceptions:\n" " * 1) If the sum of squares is greater than FLT_MAX\n" " * then the value of the floating-point values in the\n" " * result vector are undefined.\n" " * 2) If the sum of squares is less than FLT_MIN then\n" " * the implementation may return back p.\n" " * 3) If the device is in \"denorms are flushed to zero\"\n" " * mode, individual operand elements with magnitude\n" " * less than sqrt(FLT_MIN) may be flushed to zero\n" " * before proceeding with the calculation.\n" " */\n" "float __ovld __cnfn fast_normalize(float);\n" "float2 __ovld __cnfn fast_normalize(float2);\n" "float3 __ovld __cnfn fast_normalize(float3);\n" "float4 __ovld __cnfn fast_normalize(float4);\n" "\n" "// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions\n" "\n" "/**\n" " * intn isequal (floatn x, floatn y)\n" " * Returns the component-wise compare of x == y.\n" " */\n" "int __ovld __cnfn isequal(float, float);\n" "int2 __ovld __cnfn isequal(float2, float2);\n" "int3 __ovld __cnfn isequal(float3, float3);\n" "int4 __ovld __cnfn isequal(float4, float4);\n" "int8 __ovld __cnfn isequal(float8, float8);\n" "int16 __ovld __cnfn isequal(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isequal(double, double);\n" "long2 __ovld __cnfn isequal(double2, double2);\n" "long3 __ovld __cnfn isequal(double3, double3);\n" "long4 __ovld __cnfn isequal(double4, double4);\n" "long8 __ovld __cnfn isequal(double8, double8);\n" "long16 __ovld __cnfn isequal(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isequal(half, half);\n" "short2 __ovld __cnfn isequal(half2, half2);\n" "short3 __ovld __cnfn isequal(half3, half3);\n" "short4 __ovld __cnfn isequal(half4, half4);\n" "short8 __ovld __cnfn isequal(half8, half8);\n" "short16 __ovld __cnfn isequal(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of x != y.\n" " */\n" "int __ovld __cnfn isnotequal(float, float);\n" "int2 __ovld __cnfn isnotequal(float2, float2);\n" "int3 __ovld __cnfn isnotequal(float3, float3);\n" "int4 __ovld __cnfn isnotequal(float4, float4);\n" "int8 __ovld __cnfn isnotequal(float8, float8);\n" "int16 __ovld __cnfn isnotequal(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isnotequal(double, double);\n" "long2 __ovld __cnfn isnotequal(double2, double2);\n" "long3 __ovld __cnfn isnotequal(double3, double3);\n" "long4 __ovld __cnfn isnotequal(double4, double4);\n" "long8 __ovld __cnfn isnotequal(double8, double8);\n" "long16 __ovld __cnfn isnotequal(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isnotequal(half, half);\n" "short2 __ovld __cnfn isnotequal(half2, half2);\n" "short3 __ovld __cnfn isnotequal(half3, half3);\n" "short4 __ovld __cnfn isnotequal(half4, half4);\n" "short8 __ovld __cnfn isnotequal(half8, half8);\n" "short16 __ovld __cnfn isnotequal(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of x > y.\n" " */\n" "int __ovld __cnfn isgreater(float, float);\n" "int2 __ovld __cnfn isgreater(float2, float2);\n" "int3 __ovld __cnfn isgreater(float3, float3);\n" "int4 __ovld __cnfn isgreater(float4, float4);\n" "int8 __ovld __cnfn isgreater(float8, float8);\n" "int16 __ovld __cnfn isgreater(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isgreater(double, double);\n" "long2 __ovld __cnfn isgreater(double2, double2);\n" "long3 __ovld __cnfn isgreater(double3, double3);\n" "long4 __ovld __cnfn isgreater(double4, double4);\n" "long8 __ovld __cnfn isgreater(double8, double8);\n" "long16 __ovld __cnfn isgreater(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isgreater(half, half);\n" "short2 __ovld __cnfn isgreater(half2, half2);\n" "short3 __ovld __cnfn isgreater(half3, half3);\n" "short4 __ovld __cnfn isgreater(half4, half4);\n" "short8 __ovld __cnfn isgreater(half8, half8);\n" "short16 __ovld __cnfn isgreater(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of x >= y.\n" " */\n" "int __ovld __cnfn isgreaterequal(float, float);\n" "int2 __ovld __cnfn isgreaterequal(float2, float2);\n" "int3 __ovld __cnfn isgreaterequal(float3, float3);\n" "int4 __ovld __cnfn isgreaterequal(float4, float4);\n" "int8 __ovld __cnfn isgreaterequal(float8, float8);\n" "int16 __ovld __cnfn isgreaterequal(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isgreaterequal(double, double);\n" "long2 __ovld __cnfn isgreaterequal(double2, double2);\n" "long3 __ovld __cnfn isgreaterequal(double3, double3);\n" "long4 __ovld __cnfn isgreaterequal(double4, double4);\n" "long8 __ovld __cnfn isgreaterequal(double8, double8);\n" "long16 __ovld __cnfn isgreaterequal(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isgreaterequal(half, half);\n" "short2 __ovld __cnfn isgreaterequal(half2, half2);\n" "short3 __ovld __cnfn isgreaterequal(half3, half3);\n" "short4 __ovld __cnfn isgreaterequal(half4, half4);\n" "short8 __ovld __cnfn isgreaterequal(half8, half8);\n" "short16 __ovld __cnfn isgreaterequal(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of x < y.\n" " */\n" "int __ovld __cnfn isless(float, float);\n" "int2 __ovld __cnfn isless(float2, float2);\n" "int3 __ovld __cnfn isless(float3, float3);\n" "int4 __ovld __cnfn isless(float4, float4);\n" "int8 __ovld __cnfn isless(float8, float8);\n" "int16 __ovld __cnfn isless(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isless(double, double);\n" "long2 __ovld __cnfn isless(double2, double2);\n" "long3 __ovld __cnfn isless(double3, double3);\n" "long4 __ovld __cnfn isless(double4, double4);\n" "long8 __ovld __cnfn isless(double8, double8);\n" "long16 __ovld __cnfn isless(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isless(half, half);\n" "short2 __ovld __cnfn isless(half2, half2);\n" "short3 __ovld __cnfn isless(half3, half3);\n" "short4 __ovld __cnfn isless(half4, half4);\n" "short8 __ovld __cnfn isless(half8, half8);\n" "short16 __ovld __cnfn isless(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of x <= y.\n" " */\n" "int __ovld __cnfn islessequal(float, float);\n" "int2 __ovld __cnfn islessequal(float2, float2);\n" "int3 __ovld __cnfn islessequal(float3, float3);\n" "int4 __ovld __cnfn islessequal(float4, float4);\n" "int8 __ovld __cnfn islessequal(float8, float8);\n" "int16 __ovld __cnfn islessequal(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn islessequal(double, double);\n" "long2 __ovld __cnfn islessequal(double2, double2);\n" "long3 __ovld __cnfn islessequal(double3, double3);\n" "long4 __ovld __cnfn islessequal(double4, double4);\n" "long8 __ovld __cnfn islessequal(double8, double8);\n" "long16 __ovld __cnfn islessequal(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn islessequal(half, half);\n" "short2 __ovld __cnfn islessequal(half2, half2);\n" "short3 __ovld __cnfn islessequal(half3, half3);\n" "short4 __ovld __cnfn islessequal(half4, half4);\n" "short8 __ovld __cnfn islessequal(half8, half8);\n" "short16 __ovld __cnfn islessequal(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns the component-wise compare of\n" " * (x < y) || (x > y) .\n" " */\n" "int __ovld __cnfn islessgreater(float, float);\n" "int2 __ovld __cnfn islessgreater(float2, float2);\n" "int3 __ovld __cnfn islessgreater(float3, float3);\n" "int4 __ovld __cnfn islessgreater(float4, float4);\n" "int8 __ovld __cnfn islessgreater(float8, float8);\n" "int16 __ovld __cnfn islessgreater(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn islessgreater(double, double);\n" "long2 __ovld __cnfn islessgreater(double2, double2);\n" "long3 __ovld __cnfn islessgreater(double3, double3);\n" "long4 __ovld __cnfn islessgreater(double4, double4);\n" "long8 __ovld __cnfn islessgreater(double8, double8);\n" "long16 __ovld __cnfn islessgreater(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn islessgreater(half, half);\n" "short2 __ovld __cnfn islessgreater(half2, half2);\n" "short3 __ovld __cnfn islessgreater(half3, half3);\n" "short4 __ovld __cnfn islessgreater(half4, half4);\n" "short8 __ovld __cnfn islessgreater(half8, half8);\n" "short16 __ovld __cnfn islessgreater(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test for finite value.\n" " */\n" "int __ovld __cnfn isfinite(float);\n" "int2 __ovld __cnfn isfinite(float2);\n" "int3 __ovld __cnfn isfinite(float3);\n" "int4 __ovld __cnfn isfinite(float4);\n" "int8 __ovld __cnfn isfinite(float8);\n" "int16 __ovld __cnfn isfinite(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isfinite(double);\n" "long2 __ovld __cnfn isfinite(double2);\n" "long3 __ovld __cnfn isfinite(double3);\n" "long4 __ovld __cnfn isfinite(double4);\n" "long8 __ovld __cnfn isfinite(double8);\n" "long16 __ovld __cnfn isfinite(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isfinite(half);\n" "short2 __ovld __cnfn isfinite(half2);\n" "short3 __ovld __cnfn isfinite(half3);\n" "short4 __ovld __cnfn isfinite(half4);\n" "short8 __ovld __cnfn isfinite(half8);\n" "short16 __ovld __cnfn isfinite(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test for infinity value (+ve or -ve) .\n" " */\n" "int __ovld __cnfn isinf(float);\n" "int2 __ovld __cnfn isinf(float2);\n" "int3 __ovld __cnfn isinf(float3);\n" "int4 __ovld __cnfn isinf(float4);\n" "int8 __ovld __cnfn isinf(float8);\n" "int16 __ovld __cnfn isinf(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isinf(double);\n" "long2 __ovld __cnfn isinf(double2);\n" "long3 __ovld __cnfn isinf(double3);\n" "long4 __ovld __cnfn isinf(double4);\n" "long8 __ovld __cnfn isinf(double8);\n" "long16 __ovld __cnfn isinf(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isinf(half);\n" "short2 __ovld __cnfn isinf(half2);\n" "short3 __ovld __cnfn isinf(half3);\n" "short4 __ovld __cnfn isinf(half4);\n" "short8 __ovld __cnfn isinf(half8);\n" "short16 __ovld __cnfn isinf(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test for a NaN.\n" " */\n" "int __ovld __cnfn isnan(float);\n" "int2 __ovld __cnfn isnan(float2);\n" "int3 __ovld __cnfn isnan(float3);\n" "int4 __ovld __cnfn isnan(float4);\n" "int8 __ovld __cnfn isnan(float8);\n" "int16 __ovld __cnfn isnan(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isnan(double);\n" "long2 __ovld __cnfn isnan(double2);\n" "long3 __ovld __cnfn isnan(double3);\n" "long4 __ovld __cnfn isnan(double4);\n" "long8 __ovld __cnfn isnan(double8);\n" "long16 __ovld __cnfn isnan(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isnan(half);\n" "short2 __ovld __cnfn isnan(half2);\n" "short3 __ovld __cnfn isnan(half3);\n" "short4 __ovld __cnfn isnan(half4);\n" "short8 __ovld __cnfn isnan(half8);\n" "short16 __ovld __cnfn isnan(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test for a normal value.\n" " */\n" "int __ovld __cnfn isnormal(float);\n" "int2 __ovld __cnfn isnormal(float2);\n" "int3 __ovld __cnfn isnormal(float3);\n" "int4 __ovld __cnfn isnormal(float4);\n" "int8 __ovld __cnfn isnormal(float8);\n" "int16 __ovld __cnfn isnormal(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isnormal(double);\n" "long2 __ovld __cnfn isnormal(double2);\n" "long3 __ovld __cnfn isnormal(double3);\n" "long4 __ovld __cnfn isnormal(double4);\n" "long8 __ovld __cnfn isnormal(double8);\n" "long16 __ovld __cnfn isnormal(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isnormal(half);\n" "short2 __ovld __cnfn isnormal(half2);\n" "short3 __ovld __cnfn isnormal(half3);\n" "short4 __ovld __cnfn isnormal(half4);\n" "short8 __ovld __cnfn isnormal(half8);\n" "short16 __ovld __cnfn isnormal(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test if arguments are ordered. isordered() takes\n" " * arguments x and y, and returns the result\n" " * isequal(x, x) && isequal(y, y).\n" " */\n" "int __ovld __cnfn isordered(float, float);\n" "int2 __ovld __cnfn isordered(float2, float2);\n" "int3 __ovld __cnfn isordered(float3, float3);\n" "int4 __ovld __cnfn isordered(float4, float4);\n" "int8 __ovld __cnfn isordered(float8, float8);\n" "int16 __ovld __cnfn isordered(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isordered(double, double);\n" "long2 __ovld __cnfn isordered(double2, double2);\n" "long3 __ovld __cnfn isordered(double3, double3);\n" "long4 __ovld __cnfn isordered(double4, double4);\n" "long8 __ovld __cnfn isordered(double8, double8);\n" "long16 __ovld __cnfn isordered(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isordered(half, half);\n" "short2 __ovld __cnfn isordered(half2, half2);\n" "short3 __ovld __cnfn isordered(half3, half3);\n" "short4 __ovld __cnfn isordered(half4, half4);\n" "short8 __ovld __cnfn isordered(half8, half8);\n" "short16 __ovld __cnfn isordered(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test if arguments are unordered. isunordered()\n" " * takes arguments x and y, returning non-zero if x or y\n" " * is NaN, and zero otherwise.\n" " */\n" "int __ovld __cnfn isunordered(float, float);\n" "int2 __ovld __cnfn isunordered(float2, float2);\n" "int3 __ovld __cnfn isunordered(float3, float3);\n" "int4 __ovld __cnfn isunordered(float4, float4);\n" "int8 __ovld __cnfn isunordered(float8, float8);\n" "int16 __ovld __cnfn isunordered(float16, float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn isunordered(double, double);\n" "long2 __ovld __cnfn isunordered(double2, double2);\n" "long3 __ovld __cnfn isunordered(double3, double3);\n" "long4 __ovld __cnfn isunordered(double4, double4);\n" "long8 __ovld __cnfn isunordered(double8, double8);\n" "long16 __ovld __cnfn isunordered(double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn isunordered(half, half);\n" "short2 __ovld __cnfn isunordered(half2, half2);\n" "short3 __ovld __cnfn isunordered(half3, half3);\n" "short4 __ovld __cnfn isunordered(half4, half4);\n" "short8 __ovld __cnfn isunordered(half8, half8);\n" "short16 __ovld __cnfn isunordered(half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Test for sign bit. The scalar version of the function\n" " * returns a 1 if the sign bit in the float is set else returns\n" " * 0. The vector version of the function returns the\n" " * following for each component in floatn: a -1 if the\n" " * sign bit in the float is set else returns 0.\n" " */\n" "int __ovld __cnfn signbit(float);\n" "int2 __ovld __cnfn signbit(float2);\n" "int3 __ovld __cnfn signbit(float3);\n" "int4 __ovld __cnfn signbit(float4);\n" "int8 __ovld __cnfn signbit(float8);\n" "int16 __ovld __cnfn signbit(float16);\n" "#ifdef cl_khr_fp64\n" "int __ovld __cnfn signbit(double);\n" "long2 __ovld __cnfn signbit(double2);\n" "long3 __ovld __cnfn signbit(double3);\n" "long4 __ovld __cnfn signbit(double4);\n" "long8 __ovld __cnfn signbit(double8);\n" "long16 __ovld __cnfn signbit(double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "int __ovld __cnfn signbit(half);\n" "short2 __ovld __cnfn signbit(half2);\n" "short3 __ovld __cnfn signbit(half3);\n" "short4 __ovld __cnfn signbit(half4);\n" "short8 __ovld __cnfn signbit(half8);\n" "short16 __ovld __cnfn signbit(half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Returns 1 if the most significant bit in any component\n" " * of x is set; otherwise returns 0.\n" " */\n" "int __ovld __cnfn any(char);\n" "int __ovld __cnfn any(char2);\n" "int __ovld __cnfn any(char3);\n" "int __ovld __cnfn any(char4);\n" "int __ovld __cnfn any(char8);\n" "int __ovld __cnfn any(char16);\n" "int __ovld __cnfn any(short);\n" "int __ovld __cnfn any(short2);\n" "int __ovld __cnfn any(short3);\n" "int __ovld __cnfn any(short4);\n" "int __ovld __cnfn any(short8);\n" "int __ovld __cnfn any(short16);\n" "int __ovld __cnfn any(int);\n" "int __ovld __cnfn any(int2);\n" "int __ovld __cnfn any(int3);\n" "int __ovld __cnfn any(int4);\n" "int __ovld __cnfn any(int8);\n" "int __ovld __cnfn any(int16);\n" "int __ovld __cnfn any(long);\n" "int __ovld __cnfn any(long2);\n" "int __ovld __cnfn any(long3);\n" "int __ovld __cnfn any(long4);\n" "int __ovld __cnfn any(long8);\n" "int __ovld __cnfn any(long16);\n" "\n" "/**\n" " * Returns 1 if the most significant bit in all components\n" " * of x is set; otherwise returns 0.\n" " */\n" "int __ovld __cnfn all(char);\n" "int __ovld __cnfn all(char2);\n" "int __ovld __cnfn all(char3);\n" "int __ovld __cnfn all(char4);\n" "int __ovld __cnfn all(char8);\n" "int __ovld __cnfn all(char16);\n" "int __ovld __cnfn all(short);\n" "int __ovld __cnfn all(short2);\n" "int __ovld __cnfn all(short3);\n" "int __ovld __cnfn all(short4);\n" "int __ovld __cnfn all(short8);\n" "int __ovld __cnfn all(short16);\n" "int __ovld __cnfn all(int);\n" "int __ovld __cnfn all(int2);\n" "int __ovld __cnfn all(int3);\n" "int __ovld __cnfn all(int4);\n" "int __ovld __cnfn all(int8);\n" "int __ovld __cnfn all(int16);\n" "int __ovld __cnfn all(long);\n" "int __ovld __cnfn all(long2);\n" "int __ovld __cnfn all(long3);\n" "int __ovld __cnfn all(long4);\n" "int __ovld __cnfn all(long8);\n" "int __ovld __cnfn all(long16);\n" "\n" "/**\n" " * Each bit of the result is the corresponding bit of a if\n" " * the corresponding bit of c is 0. Otherwise it is the\n" " * corresponding bit of b.\n" " */\n" "char __ovld __cnfn bitselect(char, char, char);\n" "uchar __ovld __cnfn bitselect(uchar, uchar, uchar);\n" "char2 __ovld __cnfn bitselect(char2, char2, char2);\n" "uchar2 __ovld __cnfn bitselect(uchar2, uchar2, uchar2);\n" "char3 __ovld __cnfn bitselect(char3, char3, char3);\n" "uchar3 __ovld __cnfn bitselect(uchar3, uchar3, uchar3);\n" "char4 __ovld __cnfn bitselect(char4, char4, char4);\n" "uchar4 __ovld __cnfn bitselect(uchar4, uchar4, uchar4);\n" "char8 __ovld __cnfn bitselect(char8, char8, char8);\n" "uchar8 __ovld __cnfn bitselect(uchar8, uchar8, uchar8);\n" "char16 __ovld __cnfn bitselect(char16, char16, char16);\n" "uchar16 __ovld __cnfn bitselect(uchar16, uchar16, uchar16);\n" "short __ovld __cnfn bitselect(short, short, short);\n" "ushort __ovld __cnfn bitselect(ushort, ushort, ushort);\n" "short2 __ovld __cnfn bitselect(short2, short2, short2);\n" "ushort2 __ovld __cnfn bitselect(ushort2, ushort2, ushort2);\n" "short3 __ovld __cnfn bitselect(short3, short3, short3);\n" "ushort3 __ovld __cnfn bitselect(ushort3, ushort3, ushort3);\n" "short4 __ovld __cnfn bitselect(short4, short4, short4);\n" "ushort4 __ovld __cnfn bitselect(ushort4, ushort4, ushort4);\n" "short8 __ovld __cnfn bitselect(short8, short8, short8);\n" "ushort8 __ovld __cnfn bitselect(ushort8, ushort8, ushort8);\n" "short16 __ovld __cnfn bitselect(short16, short16, short16);\n" "ushort16 __ovld __cnfn bitselect(ushort16, ushort16, ushort16);\n" "int __ovld __cnfn bitselect(int, int, int);\n" "uint __ovld __cnfn bitselect(uint, uint, uint);\n" "int2 __ovld __cnfn bitselect(int2, int2, int2);\n" "uint2 __ovld __cnfn bitselect(uint2, uint2, uint2);\n" "int3 __ovld __cnfn bitselect(int3, int3, int3);\n" "uint3 __ovld __cnfn bitselect(uint3, uint3, uint3);\n" "int4 __ovld __cnfn bitselect(int4, int4, int4);\n" "uint4 __ovld __cnfn bitselect(uint4, uint4, uint4);\n" "int8 __ovld __cnfn bitselect(int8, int8, int8);\n" "uint8 __ovld __cnfn bitselect(uint8, uint8, uint8);\n" "int16 __ovld __cnfn bitselect(int16, int16, int16);\n" "uint16 __ovld __cnfn bitselect(uint16, uint16, uint16);\n" "long __ovld __cnfn bitselect(long, long, long);\n" "ulong __ovld __cnfn bitselect(ulong, ulong, ulong);\n" "long2 __ovld __cnfn bitselect(long2, long2, long2);\n" "ulong2 __ovld __cnfn bitselect(ulong2, ulong2, ulong2);\n" "long3 __ovld __cnfn bitselect(long3, long3, long3);\n" "ulong3 __ovld __cnfn bitselect(ulong3, ulong3, ulong3);\n" "long4 __ovld __cnfn bitselect(long4, long4, long4);\n" "ulong4 __ovld __cnfn bitselect(ulong4, ulong4, ulong4);\n" "long8 __ovld __cnfn bitselect(long8, long8, long8);\n" "ulong8 __ovld __cnfn bitselect(ulong8, ulong8, ulong8);\n" "long16 __ovld __cnfn bitselect(long16, long16, long16);\n" "ulong16 __ovld __cnfn bitselect(ulong16, ulong16, ulong16);\n" "float __ovld __cnfn bitselect(float, float, float);\n" "float2 __ovld __cnfn bitselect(float2, float2, float2);\n" "float3 __ovld __cnfn bitselect(float3, float3, float3);\n" "float4 __ovld __cnfn bitselect(float4, float4, float4);\n" "float8 __ovld __cnfn bitselect(float8, float8, float8);\n" "float16 __ovld __cnfn bitselect(float16, float16, float16);\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn bitselect(double, double, double);\n" "double2 __ovld __cnfn bitselect(double2, double2, double2);\n" "double3 __ovld __cnfn bitselect(double3, double3, double3);\n" "double4 __ovld __cnfn bitselect(double4, double4, double4);\n" "double8 __ovld __cnfn bitselect(double8, double8, double8);\n" "double16 __ovld __cnfn bitselect(double16, double16, double16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn bitselect(half, half, half);\n" "half2 __ovld __cnfn bitselect(half2, half2, half2);\n" "half3 __ovld __cnfn bitselect(half3, half3, half3);\n" "half4 __ovld __cnfn bitselect(half4, half4, half4);\n" "half8 __ovld __cnfn bitselect(half8, half8, half8);\n" "half16 __ovld __cnfn bitselect(half16, half16, half16);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * For each component of a vector type,\n" " * result[i] = if MSB of c[i] is set ? b[i] : a[i].\n" " * For a scalar type, result = c ? b : a.\n" " * b and a must have the same type.\n" " * c must have the same number of elements and bits as a.\n" " */\n" "char __ovld __cnfn select(char, char, char);\n" "uchar __ovld __cnfn select(uchar, uchar, char);\n" "char2 __ovld __cnfn select(char2, char2, char2);\n" "uchar2 __ovld __cnfn select(uchar2, uchar2, char2);\n" "char3 __ovld __cnfn select(char3, char3, char3);\n" "uchar3 __ovld __cnfn select(uchar3, uchar3, char3);\n" "char4 __ovld __cnfn select(char4, char4, char4);\n" "uchar4 __ovld __cnfn select(uchar4, uchar4, char4);\n" "char8 __ovld __cnfn select(char8, char8, char8);\n" "uchar8 __ovld __cnfn select(uchar8, uchar8, char8);\n" "char16 __ovld __cnfn select(char16, char16, char16);\n" "uchar16 __ovld __cnfn select(uchar16, uchar16, char16);\n" "\n" "short __ovld __cnfn select(short, short, short);\n" "ushort __ovld __cnfn select(ushort, ushort, short);\n" "short2 __ovld __cnfn select(short2, short2, short2);\n" "ushort2 __ovld __cnfn select(ushort2, ushort2, short2);\n" "short3 __ovld __cnfn select(short3, short3, short3);\n" "ushort3 __ovld __cnfn select(ushort3, ushort3, short3);\n" "short4 __ovld __cnfn select(short4, short4, short4);\n" "ushort4 __ovld __cnfn select(ushort4, ushort4, short4);\n" "short8 __ovld __cnfn select(short8, short8, short8);\n" "ushort8 __ovld __cnfn select(ushort8, ushort8, short8);\n" "short16 __ovld __cnfn select(short16, short16, short16);\n" "ushort16 __ovld __cnfn select(ushort16, ushort16, short16);\n" "\n" "int __ovld __cnfn select(int, int, int);\n" "uint __ovld __cnfn select(uint, uint, int);\n" "int2 __ovld __cnfn select(int2, int2, int2);\n" "uint2 __ovld __cnfn select(uint2, uint2, int2);\n" "int3 __ovld __cnfn select(int3, int3, int3);\n" "uint3 __ovld __cnfn select(uint3, uint3, int3);\n" "int4 __ovld __cnfn select(int4, int4, int4);\n" "uint4 __ovld __cnfn select(uint4, uint4, int4);\n" "int8 __ovld __cnfn select(int8, int8, int8);\n" "uint8 __ovld __cnfn select(uint8, uint8, int8);\n" "int16 __ovld __cnfn select(int16, int16, int16);\n" "uint16 __ovld __cnfn select(uint16, uint16, int16);\n" "float __ovld __cnfn select(float, float, int);\n" "float2 __ovld __cnfn select(float2, float2, int2);\n" "float3 __ovld __cnfn select(float3, float3, int3);\n" "float4 __ovld __cnfn select(float4, float4, int4);\n" "float8 __ovld __cnfn select(float8, float8, int8);\n" "float16 __ovld __cnfn select(float16, float16, int16);\n" "\n" "long __ovld __cnfn select(long, long, long);\n" "ulong __ovld __cnfn select(ulong, ulong, long);\n" "long2 __ovld __cnfn select(long2, long2, long2);\n" "ulong2 __ovld __cnfn select(ulong2, ulong2, long2);\n" "long3 __ovld __cnfn select(long3, long3, long3);\n" "ulong3 __ovld __cnfn select(ulong3, ulong3, long3);\n" "long4 __ovld __cnfn select(long4, long4, long4);\n" "ulong4 __ovld __cnfn select(ulong4, ulong4, long4);\n" "long8 __ovld __cnfn select(long8, long8, long8);\n" "ulong8 __ovld __cnfn select(ulong8, ulong8, long8);\n" "long16 __ovld __cnfn select(long16, long16, long16);\n" "ulong16 __ovld __cnfn select(ulong16, ulong16, long16);\n" "\n" "char __ovld __cnfn select(char, char, uchar);\n" "uchar __ovld __cnfn select(uchar, uchar, uchar);\n" "char2 __ovld __cnfn select(char2, char2, uchar2);\n" "uchar2 __ovld __cnfn select(uchar2, uchar2, uchar2);\n" "char3 __ovld __cnfn select(char3, char3, uchar3);\n" "uchar3 __ovld __cnfn select(uchar3, uchar3, uchar3);\n" "char4 __ovld __cnfn select(char4, char4, uchar4);\n" "uchar4 __ovld __cnfn select(uchar4, uchar4, uchar4);\n" "char8 __ovld __cnfn select(char8, char8, uchar8);\n" "uchar8 __ovld __cnfn select(uchar8, uchar8, uchar8);\n" "char16 __ovld __cnfn select(char16, char16, uchar16);\n" "uchar16 __ovld __cnfn select(uchar16, uchar16, uchar16);\n" "\n" "short __ovld __cnfn select(short, short, ushort);\n" "ushort __ovld __cnfn select(ushort, ushort, ushort);\n" "short2 __ovld __cnfn select(short2, short2, ushort2);\n" "ushort2 __ovld __cnfn select(ushort2, ushort2, ushort2);\n" "short3 __ovld __cnfn select(short3, short3, ushort3);\n" "ushort3 __ovld __cnfn select(ushort3, ushort3, ushort3);\n" "short4 __ovld __cnfn select(short4, short4, ushort4);\n" "ushort4 __ovld __cnfn select(ushort4, ushort4, ushort4);\n" "short8 __ovld __cnfn select(short8, short8, ushort8);\n" "ushort8 __ovld __cnfn select(ushort8, ushort8, ushort8);\n" "short16 __ovld __cnfn select(short16, short16, ushort16);\n" "ushort16 __ovld __cnfn select(ushort16, ushort16, ushort16);\n" "\n" "int __ovld __cnfn select(int, int, uint);\n" "uint __ovld __cnfn select(uint, uint, uint);\n" "int2 __ovld __cnfn select(int2, int2, uint2);\n" "uint2 __ovld __cnfn select(uint2, uint2, uint2);\n" "int3 __ovld __cnfn select(int3, int3, uint3);\n" "uint3 __ovld __cnfn select(uint3, uint3, uint3);\n" "int4 __ovld __cnfn select(int4, int4, uint4);\n" "uint4 __ovld __cnfn select(uint4, uint4, uint4);\n" "int8 __ovld __cnfn select(int8, int8, uint8);\n" "uint8 __ovld __cnfn select(uint8, uint8, uint8);\n" "int16 __ovld __cnfn select(int16, int16, uint16);\n" "uint16 __ovld __cnfn select(uint16, uint16, uint16);\n" "float __ovld __cnfn select(float, float, uint);\n" "float2 __ovld __cnfn select(float2, float2, uint2);\n" "float3 __ovld __cnfn select(float3, float3, uint3);\n" "float4 __ovld __cnfn select(float4, float4, uint4);\n" "float8 __ovld __cnfn select(float8, float8, uint8);\n" "float16 __ovld __cnfn select(float16, float16, uint16);\n" "\n" "long __ovld __cnfn select(long, long, ulong);\n" "ulong __ovld __cnfn select(ulong, ulong, ulong);\n" "long2 __ovld __cnfn select(long2, long2, ulong2);\n" "ulong2 __ovld __cnfn select(ulong2, ulong2, ulong2);\n" "long3 __ovld __cnfn select(long3, long3, ulong3);\n" "ulong3 __ovld __cnfn select(ulong3, ulong3, ulong3);\n" "long4 __ovld __cnfn select(long4, long4, ulong4);\n" "ulong4 __ovld __cnfn select(ulong4, ulong4, ulong4);\n" "long8 __ovld __cnfn select(long8, long8, ulong8);\n" "ulong8 __ovld __cnfn select(ulong8, ulong8, ulong8);\n" "long16 __ovld __cnfn select(long16, long16, ulong16);\n" "ulong16 __ovld __cnfn select(ulong16, ulong16, ulong16);\n" "\n" "#ifdef cl_khr_fp64\n" "double __ovld __cnfn select(double, double, long);\n" "double2 __ovld __cnfn select(double2, double2, long2);\n" "double3 __ovld __cnfn select(double3, double3, long3);\n" "double4 __ovld __cnfn select(double4, double4, long4);\n" "double8 __ovld __cnfn select(double8, double8, long8);\n" "double16 __ovld __cnfn select(double16, double16, long16);\n" "double __ovld __cnfn select(double, double, ulong);\n" "double2 __ovld __cnfn select(double2, double2, ulong2);\n" "double3 __ovld __cnfn select(double3, double3, ulong3);\n" "double4 __ovld __cnfn select(double4, double4, ulong4);\n" "double8 __ovld __cnfn select(double8, double8, ulong8);\n" "double16 __ovld __cnfn select(double16, double16, ulong16);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "half __ovld __cnfn select(half, half, short);\n" "half2 __ovld __cnfn select(half2, half2, short2);\n" "half3 __ovld __cnfn select(half3, half3, short3);\n" "half4 __ovld __cnfn select(half4, half4, short4);\n" "half8 __ovld __cnfn select(half8, half8, short8);\n" "half16 __ovld __cnfn select(half16, half16, short16);\n" "half __ovld __cnfn select(half, half, ushort);\n" "half2 __ovld __cnfn select(half2, half2, ushort2);\n" "half3 __ovld __cnfn select(half3, half3, ushort3);\n" "half4 __ovld __cnfn select(half4, half4, ushort4);\n" "half8 __ovld __cnfn select(half8, half8, ushort8);\n" "half16 __ovld __cnfn select(half16, half16, ushort16);\n" "#endif //cl_khr_fp16\n" "\n" "// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions\n" "// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type\n" "/**\n" " * Use generic type gentype to indicate the built-in data types\n" " * char, uchar, short, ushort, int, uint, long, ulong, float,\n" " * double or half.\n" " *\n" " * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).\n" " *\n" " * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).\n" " *\n" " * The address computed as (p + (offset * n)) must be\n" " * 8-bit aligned if gentype is char, uchar;\n" " * 16-bit aligned if gentype is short, ushort, half;\n" " * 32-bit aligned if gentype is int, uint, float;\n" " * 64-bit aligned if gentype is long, ulong, double.\n" " */\n" "\n" "char2 __ovld __purefn vload2(size_t, const __constant char *);\n" "uchar2 __ovld __purefn vload2(size_t, const __constant uchar *);\n" "short2 __ovld __purefn vload2(size_t, const __constant short *);\n" "ushort2 __ovld __purefn vload2(size_t, const __constant ushort *);\n" "int2 __ovld __purefn vload2(size_t, const __constant int *);\n" "uint2 __ovld __purefn vload2(size_t, const __constant uint *);\n" "long2 __ovld __purefn vload2(size_t, const __constant long *);\n" "ulong2 __ovld __purefn vload2(size_t, const __constant ulong *);\n" "float2 __ovld __purefn vload2(size_t, const __constant float *);\n" "char3 __ovld __purefn vload3(size_t, const __constant char *);\n" "uchar3 __ovld __purefn vload3(size_t, const __constant uchar *);\n" "short3 __ovld __purefn vload3(size_t, const __constant short *);\n" "ushort3 __ovld __purefn vload3(size_t, const __constant ushort *);\n" "int3 __ovld __purefn vload3(size_t, const __constant int *);\n" "uint3 __ovld __purefn vload3(size_t, const __constant uint *);\n" "long3 __ovld __purefn vload3(size_t, const __constant long *);\n" "ulong3 __ovld __purefn vload3(size_t, const __constant ulong *);\n" "float3 __ovld __purefn vload3(size_t, const __constant float *);\n" "char4 __ovld __purefn vload4(size_t, const __constant char *);\n" "uchar4 __ovld __purefn vload4(size_t, const __constant uchar *);\n" "short4 __ovld __purefn vload4(size_t, const __constant short *);\n" "ushort4 __ovld __purefn vload4(size_t, const __constant ushort *);\n" "int4 __ovld __purefn vload4(size_t, const __constant int *);\n" "uint4 __ovld __purefn vload4(size_t, const __constant uint *);\n" "long4 __ovld __purefn vload4(size_t, const __constant long *);\n" "ulong4 __ovld __purefn vload4(size_t, const __constant ulong *);\n" "float4 __ovld __purefn vload4(size_t, const __constant float *);\n" "char8 __ovld __purefn vload8(size_t, const __constant char *);\n" "uchar8 __ovld __purefn vload8(size_t, const __constant uchar *);\n" "short8 __ovld __purefn vload8(size_t, const __constant short *);\n" "ushort8 __ovld __purefn vload8(size_t, const __constant ushort *);\n" "int8 __ovld __purefn vload8(size_t, const __constant int *);\n" "uint8 __ovld __purefn vload8(size_t, const __constant uint *);\n" "long8 __ovld __purefn vload8(size_t, const __constant long *);\n" "ulong8 __ovld __purefn vload8(size_t, const __constant ulong *);\n" "float8 __ovld __purefn vload8(size_t, const __constant float *);\n" "char16 __ovld __purefn vload16(size_t, const __constant char *);\n" "uchar16 __ovld __purefn vload16(size_t, const __constant uchar *);\n" "short16 __ovld __purefn vload16(size_t, const __constant short *);\n" "ushort16 __ovld __purefn vload16(size_t, const __constant ushort *);\n" "int16 __ovld __purefn vload16(size_t, const __constant int *);\n" "uint16 __ovld __purefn vload16(size_t, const __constant uint *);\n" "long16 __ovld __purefn vload16(size_t, const __constant long *);\n" "ulong16 __ovld __purefn vload16(size_t, const __constant ulong *);\n" "float16 __ovld __purefn vload16(size_t, const __constant float *);\n" "#ifdef cl_khr_fp64\n" "double2 __ovld __purefn vload2(size_t, const __constant double *);\n" "double3 __ovld __purefn vload3(size_t, const __constant double *);\n" "double4 __ovld __purefn vload4(size_t, const __constant double *);\n" "double8 __ovld __purefn vload8(size_t, const __constant double *);\n" "double16 __ovld __purefn vload16(size_t, const __constant double *);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half2 __ovld __purefn vload2(size_t, const __constant half *);\n" "half3 __ovld __purefn vload3(size_t, const __constant half *);\n" "half4 __ovld __purefn vload4(size_t, const __constant half *);\n" "half8 __ovld __purefn vload8(size_t, const __constant half *);\n" "half16 __ovld __purefn vload16(size_t, const __constant half *);\n" "#endif //cl_khr_fp16\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "char2 __ovld __purefn vload2(size_t, const char *);\n" "uchar2 __ovld __purefn vload2(size_t, const uchar *);\n" "short2 __ovld __purefn vload2(size_t, const short *);\n" "ushort2 __ovld __purefn vload2(size_t, const ushort *);\n" "int2 __ovld __purefn vload2(size_t, const int *);\n" "uint2 __ovld __purefn vload2(size_t, const uint *);\n" "long2 __ovld __purefn vload2(size_t, const long *);\n" "ulong2 __ovld __purefn vload2(size_t, const ulong *);\n" "float2 __ovld __purefn vload2(size_t, const float *);\n" "char3 __ovld __purefn vload3(size_t, const char *);\n" "uchar3 __ovld __purefn vload3(size_t, const uchar *);\n" "short3 __ovld __purefn vload3(size_t, const short *);\n" "ushort3 __ovld __purefn vload3(size_t, const ushort *);\n" "int3 __ovld __purefn vload3(size_t, const int *);\n" "uint3 __ovld __purefn vload3(size_t, const uint *);\n" "long3 __ovld __purefn vload3(size_t, const long *);\n" "ulong3 __ovld __purefn vload3(size_t, const ulong *);\n" "float3 __ovld __purefn vload3(size_t, const float *);\n" "char4 __ovld __purefn vload4(size_t, const char *);\n" "uchar4 __ovld __purefn vload4(size_t, const uchar *);\n" "short4 __ovld __purefn vload4(size_t, const short *);\n" "ushort4 __ovld __purefn vload4(size_t, const ushort *);\n" "int4 __ovld __purefn vload4(size_t, const int *);\n" "uint4 __ovld __purefn vload4(size_t, const uint *);\n" "long4 __ovld __purefn vload4(size_t, const long *);\n" "ulong4 __ovld __purefn vload4(size_t, const ulong *);\n" "float4 __ovld __purefn vload4(size_t, const float *);\n" "char8 __ovld __purefn vload8(size_t, const char *);\n" "uchar8 __ovld __purefn vload8(size_t, const uchar *);\n" "short8 __ovld __purefn vload8(size_t, const short *);\n" "ushort8 __ovld __purefn vload8(size_t, const ushort *);\n" "int8 __ovld __purefn vload8(size_t, const int *);\n" "uint8 __ovld __purefn vload8(size_t, const uint *);\n" "long8 __ovld __purefn vload8(size_t, const long *);\n" "ulong8 __ovld __purefn vload8(size_t, const ulong *);\n" "float8 __ovld __purefn vload8(size_t, const float *);\n" "char16 __ovld __purefn vload16(size_t, const char *);\n" "uchar16 __ovld __purefn vload16(size_t, const uchar *);\n" "short16 __ovld __purefn vload16(size_t, const short *);\n" "ushort16 __ovld __purefn vload16(size_t, const ushort *);\n" "int16 __ovld __purefn vload16(size_t, const int *);\n" "uint16 __ovld __purefn vload16(size_t, const uint *);\n" "long16 __ovld __purefn vload16(size_t, const long *);\n" "ulong16 __ovld __purefn vload16(size_t, const ulong *);\n" "float16 __ovld __purefn vload16(size_t, const float *);\n" "\n" "#ifdef cl_khr_fp64\n" "double2 __ovld __purefn vload2(size_t, const double *);\n" "double3 __ovld __purefn vload3(size_t, const double *);\n" "double4 __ovld __purefn vload4(size_t, const double *);\n" "double8 __ovld __purefn vload8(size_t, const double *);\n" "double16 __ovld __purefn vload16(size_t, const double *);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half2 __ovld __purefn vload2(size_t, const half *);\n" "half3 __ovld __purefn vload3(size_t, const half *);\n" "half4 __ovld __purefn vload4(size_t, const half *);\n" "half8 __ovld __purefn vload8(size_t, const half *);\n" "half16 __ovld __purefn vload16(size_t, const half *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "char2 __ovld __purefn vload2(size_t, const __global char *);\n" "uchar2 __ovld __purefn vload2(size_t, const __global uchar *);\n" "short2 __ovld __purefn vload2(size_t, const __global short *);\n" "ushort2 __ovld __purefn vload2(size_t, const __global ushort *);\n" "int2 __ovld __purefn vload2(size_t, const __global int *);\n" "uint2 __ovld __purefn vload2(size_t, const __global uint *);\n" "long2 __ovld __purefn vload2(size_t, const __global long *);\n" "ulong2 __ovld __purefn vload2(size_t, const __global ulong *);\n" "float2 __ovld __purefn vload2(size_t, const __global float *);\n" "char3 __ovld __purefn vload3(size_t, const __global char *);\n" "uchar3 __ovld __purefn vload3(size_t, const __global uchar *);\n" "short3 __ovld __purefn vload3(size_t, const __global short *);\n" "ushort3 __ovld __purefn vload3(size_t, const __global ushort *);\n" "int3 __ovld __purefn vload3(size_t, const __global int *);\n" "uint3 __ovld __purefn vload3(size_t, const __global uint *);\n" "long3 __ovld __purefn vload3(size_t, const __global long *);\n" "ulong3 __ovld __purefn vload3(size_t, const __global ulong *);\n" "float3 __ovld __purefn vload3(size_t, const __global float *);\n" "char4 __ovld __purefn vload4(size_t, const __global char *);\n" "uchar4 __ovld __purefn vload4(size_t, const __global uchar *);\n" "short4 __ovld __purefn vload4(size_t, const __global short *);\n" "ushort4 __ovld __purefn vload4(size_t, const __global ushort *);\n" "int4 __ovld __purefn vload4(size_t, const __global int *);\n" "uint4 __ovld __purefn vload4(size_t, const __global uint *);\n" "long4 __ovld __purefn vload4(size_t, const __global long *);\n" "ulong4 __ovld __purefn vload4(size_t, const __global ulong *);\n" "float4 __ovld __purefn vload4(size_t, const __global float *);\n" "char8 __ovld __purefn vload8(size_t, const __global char *);\n" "uchar8 __ovld __purefn vload8(size_t, const __global uchar *);\n" "short8 __ovld __purefn vload8(size_t, const __global short *);\n" "ushort8 __ovld __purefn vload8(size_t, const __global ushort *);\n" "int8 __ovld __purefn vload8(size_t, const __global int *);\n" "uint8 __ovld __purefn vload8(size_t, const __global uint *);\n" "long8 __ovld __purefn vload8(size_t, const __global long *);\n" "ulong8 __ovld __purefn vload8(size_t, const __global ulong *);\n" "float8 __ovld __purefn vload8(size_t, const __global float *);\n" "char16 __ovld __purefn vload16(size_t, const __global char *);\n" "uchar16 __ovld __purefn vload16(size_t, const __global uchar *);\n" "short16 __ovld __purefn vload16(size_t, const __global short *);\n" "ushort16 __ovld __purefn vload16(size_t, const __global ushort *);\n" "int16 __ovld __purefn vload16(size_t, const __global int *);\n" "uint16 __ovld __purefn vload16(size_t, const __global uint *);\n" "long16 __ovld __purefn vload16(size_t, const __global long *);\n" "ulong16 __ovld __purefn vload16(size_t, const __global ulong *);\n" "float16 __ovld __purefn vload16(size_t, const __global float *);\n" "char2 __ovld __purefn vload2(size_t, const __local char *);\n" "uchar2 __ovld __purefn vload2(size_t, const __local uchar *);\n" "short2 __ovld __purefn vload2(size_t, const __local short *);\n" "ushort2 __ovld __purefn vload2(size_t, const __local ushort *);\n" "int2 __ovld __purefn vload2(size_t, const __local int *);\n" "uint2 __ovld __purefn vload2(size_t, const __local uint *);\n" "long2 __ovld __purefn vload2(size_t, const __local long *);\n" "ulong2 __ovld __purefn vload2(size_t, const __local ulong *);\n" "float2 __ovld __purefn vload2(size_t, const __local float *);\n" "char3 __ovld __purefn vload3(size_t, const __local char *);\n" "uchar3 __ovld __purefn vload3(size_t, const __local uchar *);\n" "short3 __ovld __purefn vload3(size_t, const __local short *);\n" "ushort3 __ovld __purefn vload3(size_t, const __local ushort *);\n" "int3 __ovld __purefn vload3(size_t, const __local int *);\n" "uint3 __ovld __purefn vload3(size_t, const __local uint *);\n" "long3 __ovld __purefn vload3(size_t, const __local long *);\n" "ulong3 __ovld __purefn vload3(size_t, const __local ulong *);\n" "float3 __ovld __purefn vload3(size_t, const __local float *);\n" "char4 __ovld __purefn vload4(size_t, const __local char *);\n" "uchar4 __ovld __purefn vload4(size_t, const __local uchar *);\n" "short4 __ovld __purefn vload4(size_t, const __local short *);\n" "ushort4 __ovld __purefn vload4(size_t, const __local ushort *);\n" "int4 __ovld __purefn vload4(size_t, const __local int *);\n" "uint4 __ovld __purefn vload4(size_t, const __local uint *);\n" "long4 __ovld __purefn vload4(size_t, const __local long *);\n" "ulong4 __ovld __purefn vload4(size_t, const __local ulong *);\n" "float4 __ovld __purefn vload4(size_t, const __local float *);\n" "char8 __ovld __purefn vload8(size_t, const __local char *);\n" "uchar8 __ovld __purefn vload8(size_t, const __local uchar *);\n" "short8 __ovld __purefn vload8(size_t, const __local short *);\n" "ushort8 __ovld __purefn vload8(size_t, const __local ushort *);\n" "int8 __ovld __purefn vload8(size_t, const __local int *);\n" "uint8 __ovld __purefn vload8(size_t, const __local uint *);\n" "long8 __ovld __purefn vload8(size_t, const __local long *);\n" "ulong8 __ovld __purefn vload8(size_t, const __local ulong *);\n" "float8 __ovld __purefn vload8(size_t, const __local float *);\n" "char16 __ovld __purefn vload16(size_t, const __local char *);\n" "uchar16 __ovld __purefn vload16(size_t, const __local uchar *);\n" "short16 __ovld __purefn vload16(size_t, const __local short *);\n" "ushort16 __ovld __purefn vload16(size_t, const __local ushort *);\n" "int16 __ovld __purefn vload16(size_t, const __local int *);\n" "uint16 __ovld __purefn vload16(size_t, const __local uint *);\n" "long16 __ovld __purefn vload16(size_t, const __local long *);\n" "ulong16 __ovld __purefn vload16(size_t, const __local ulong *);\n" "float16 __ovld __purefn vload16(size_t, const __local float *);\n" "char2 __ovld __purefn vload2(size_t, const __private char *);\n" "uchar2 __ovld __purefn vload2(size_t, const __private uchar *);\n" "short2 __ovld __purefn vload2(size_t, const __private short *);\n" "ushort2 __ovld __purefn vload2(size_t, const __private ushort *);\n" "int2 __ovld __purefn vload2(size_t, const __private int *);\n" "uint2 __ovld __purefn vload2(size_t, const __private uint *);\n" "long2 __ovld __purefn vload2(size_t, const __private long *);\n" "ulong2 __ovld __purefn vload2(size_t, const __private ulong *);\n" "float2 __ovld __purefn vload2(size_t, const __private float *);\n" "char3 __ovld __purefn vload3(size_t, const __private char *);\n" "uchar3 __ovld __purefn vload3(size_t, const __private uchar *);\n" "short3 __ovld __purefn vload3(size_t, const __private short *);\n" "ushort3 __ovld __purefn vload3(size_t, const __private ushort *);\n" "int3 __ovld __purefn vload3(size_t, const __private int *);\n" "uint3 __ovld __purefn vload3(size_t, const __private uint *);\n" "long3 __ovld __purefn vload3(size_t, const __private long *);\n" "ulong3 __ovld __purefn vload3(size_t, const __private ulong *);\n" "float3 __ovld __purefn vload3(size_t, const __private float *);\n" "char4 __ovld __purefn vload4(size_t, const __private char *);\n" "uchar4 __ovld __purefn vload4(size_t, const __private uchar *);\n" "short4 __ovld __purefn vload4(size_t, const __private short *);\n" "ushort4 __ovld __purefn vload4(size_t, const __private ushort *);\n" "int4 __ovld __purefn vload4(size_t, const __private int *);\n" "uint4 __ovld __purefn vload4(size_t, const __private uint *);\n" "long4 __ovld __purefn vload4(size_t, const __private long *);\n" "ulong4 __ovld __purefn vload4(size_t, const __private ulong *);\n" "float4 __ovld __purefn vload4(size_t, const __private float *);\n" "char8 __ovld __purefn vload8(size_t, const __private char *);\n" "uchar8 __ovld __purefn vload8(size_t, const __private uchar *);\n" "short8 __ovld __purefn vload8(size_t, const __private short *);\n" "ushort8 __ovld __purefn vload8(size_t, const __private ushort *);\n" "int8 __ovld __purefn vload8(size_t, const __private int *);\n" "uint8 __ovld __purefn vload8(size_t, const __private uint *);\n" "long8 __ovld __purefn vload8(size_t, const __private long *);\n" "ulong8 __ovld __purefn vload8(size_t, const __private ulong *);\n" "float8 __ovld __purefn vload8(size_t, const __private float *);\n" "char16 __ovld __purefn vload16(size_t, const __private char *);\n" "uchar16 __ovld __purefn vload16(size_t, const __private uchar *);\n" "short16 __ovld __purefn vload16(size_t, const __private short *);\n" "ushort16 __ovld __purefn vload16(size_t, const __private ushort *);\n" "int16 __ovld __purefn vload16(size_t, const __private int *);\n" "uint16 __ovld __purefn vload16(size_t, const __private uint *);\n" "long16 __ovld __purefn vload16(size_t, const __private long *);\n" "ulong16 __ovld __purefn vload16(size_t, const __private ulong *);\n" "float16 __ovld __purefn vload16(size_t, const __private float *);\n" "\n" "#ifdef cl_khr_fp64\n" "double2 __ovld __purefn vload2(size_t, const __global double *);\n" "double3 __ovld __purefn vload3(size_t, const __global double *);\n" "double4 __ovld __purefn vload4(size_t, const __global double *);\n" "double8 __ovld __purefn vload8(size_t, const __global double *);\n" "double16 __ovld __purefn vload16(size_t, const __global double *);\n" "double2 __ovld __purefn vload2(size_t, const __local double *);\n" "double3 __ovld __purefn vload3(size_t, const __local double *);\n" "double4 __ovld __purefn vload4(size_t, const __local double *);\n" "double8 __ovld __purefn vload8(size_t, const __local double *);\n" "double16 __ovld __purefn vload16(size_t, const __local double *);\n" "double2 __ovld __purefn vload2(size_t, const __private double *);\n" "double3 __ovld __purefn vload3(size_t, const __private double *);\n" "double4 __ovld __purefn vload4(size_t, const __private double *);\n" "double8 __ovld __purefn vload8(size_t, const __private double *);\n" "double16 __ovld __purefn vload16(size_t, const __private double *);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half2 __ovld __purefn vload2(size_t, const __global half *);\n" "half3 __ovld __purefn vload3(size_t, const __global half *);\n" "half4 __ovld __purefn vload4(size_t, const __global half *);\n" "half8 __ovld __purefn vload8(size_t, const __global half *);\n" "half16 __ovld __purefn vload16(size_t, const __global half *);\n" "half2 __ovld __purefn vload2(size_t, const __local half *);\n" "half3 __ovld __purefn vload3(size_t, const __local half *);\n" "half4 __ovld __purefn vload4(size_t, const __local half *);\n" "half8 __ovld __purefn vload8(size_t, const __local half *);\n" "half16 __ovld __purefn vload16(size_t, const __local half *);\n" "half2 __ovld __purefn vload2(size_t, const __private half *);\n" "half3 __ovld __purefn vload3(size_t, const __private half *);\n" "half4 __ovld __purefn vload4(size_t, const __private half *);\n" "half8 __ovld __purefn vload8(size_t, const __private half *);\n" "half16 __ovld __purefn vload16(size_t, const __private half *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld vstore2(char2, size_t, char *);\n" "void __ovld vstore2(uchar2, size_t, uchar *);\n" "void __ovld vstore2(short2, size_t, short *);\n" "void __ovld vstore2(ushort2, size_t, ushort *);\n" "void __ovld vstore2(int2, size_t, int *);\n" "void __ovld vstore2(uint2, size_t, uint *);\n" "void __ovld vstore2(long2, size_t, long *);\n" "void __ovld vstore2(ulong2, size_t, ulong *);\n" "void __ovld vstore2(float2, size_t, float *);\n" "void __ovld vstore3(char3, size_t, char *);\n" "void __ovld vstore3(uchar3, size_t, uchar *);\n" "void __ovld vstore3(short3, size_t, short *);\n" "void __ovld vstore3(ushort3, size_t, ushort *);\n" "void __ovld vstore3(int3, size_t, int *);\n" "void __ovld vstore3(uint3, size_t, uint *);\n" "void __ovld vstore3(long3, size_t, long *);\n" "void __ovld vstore3(ulong3, size_t, ulong *);\n" "void __ovld vstore3(float3, size_t, float *);\n" "void __ovld vstore4(char4, size_t, char *);\n" "void __ovld vstore4(uchar4, size_t, uchar *);\n" "void __ovld vstore4(short4, size_t, short *);\n" "void __ovld vstore4(ushort4, size_t, ushort *);\n" "void __ovld vstore4(int4, size_t, int *);\n" "void __ovld vstore4(uint4, size_t, uint *);\n" "void __ovld vstore4(long4, size_t, long *);\n" "void __ovld vstore4(ulong4, size_t, ulong *);\n" "void __ovld vstore4(float4, size_t, float *);\n" "void __ovld vstore8(char8, size_t, char *);\n" "void __ovld vstore8(uchar8, size_t, uchar *);\n" "void __ovld vstore8(short8, size_t, short *);\n" "void __ovld vstore8(ushort8, size_t, ushort *);\n" "void __ovld vstore8(int8, size_t, int *);\n" "void __ovld vstore8(uint8, size_t, uint *);\n" "void __ovld vstore8(long8, size_t, long *);\n" "void __ovld vstore8(ulong8, size_t, ulong *);\n" "void __ovld vstore8(float8, size_t, float *);\n" "void __ovld vstore16(char16, size_t, char *);\n" "void __ovld vstore16(uchar16, size_t, uchar *);\n" "void __ovld vstore16(short16, size_t, short *);\n" "void __ovld vstore16(ushort16, size_t, ushort *);\n" "void __ovld vstore16(int16, size_t, int *);\n" "void __ovld vstore16(uint16, size_t, uint *);\n" "void __ovld vstore16(long16, size_t, long *);\n" "void __ovld vstore16(ulong16, size_t, ulong *);\n" "void __ovld vstore16(float16, size_t, float *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore2(double2, size_t, double *);\n" "void __ovld vstore3(double3, size_t, double *);\n" "void __ovld vstore4(double4, size_t, double *);\n" "void __ovld vstore8(double8, size_t, double *);\n" "void __ovld vstore16(double16, size_t, double *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "void __ovld vstore2(half2, size_t, half *);\n" "void __ovld vstore3(half3, size_t, half *);\n" "void __ovld vstore4(half4, size_t, half *);\n" "void __ovld vstore8(half8, size_t, half *);\n" "void __ovld vstore16(half16, size_t, half *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "void __ovld vstore2(char2, size_t, __global char *);\n" "void __ovld vstore2(uchar2, size_t, __global uchar *);\n" "void __ovld vstore2(short2, size_t, __global short *);\n" "void __ovld vstore2(ushort2, size_t, __global ushort *);\n" "void __ovld vstore2(int2, size_t, __global int *);\n" "void __ovld vstore2(uint2, size_t, __global uint *);\n" "void __ovld vstore2(long2, size_t, __global long *);\n" "void __ovld vstore2(ulong2, size_t, __global ulong *);\n" "void __ovld vstore2(float2, size_t, __global float *);\n" "void __ovld vstore3(char3, size_t, __global char *);\n" "void __ovld vstore3(uchar3, size_t, __global uchar *);\n" "void __ovld vstore3(short3, size_t, __global short *);\n" "void __ovld vstore3(ushort3, size_t, __global ushort *);\n" "void __ovld vstore3(int3, size_t, __global int *);\n" "void __ovld vstore3(uint3, size_t, __global uint *);\n" "void __ovld vstore3(long3, size_t, __global long *);\n" "void __ovld vstore3(ulong3, size_t, __global ulong *);\n" "void __ovld vstore3(float3, size_t, __global float *);\n" "void __ovld vstore4(char4, size_t, __global char *);\n" "void __ovld vstore4(uchar4, size_t, __global uchar *);\n" "void __ovld vstore4(short4, size_t, __global short *);\n" "void __ovld vstore4(ushort4, size_t, __global ushort *);\n" "void __ovld vstore4(int4, size_t, __global int *);\n" "void __ovld vstore4(uint4, size_t, __global uint *);\n" "void __ovld vstore4(long4, size_t, __global long *);\n" "void __ovld vstore4(ulong4, size_t, __global ulong *);\n" "void __ovld vstore4(float4, size_t, __global float *);\n" "void __ovld vstore8(char8, size_t, __global char *);\n" "void __ovld vstore8(uchar8, size_t, __global uchar *);\n" "void __ovld vstore8(short8, size_t, __global short *);\n" "void __ovld vstore8(ushort8, size_t, __global ushort *);\n" "void __ovld vstore8(int8, size_t, __global int *);\n" "void __ovld vstore8(uint8, size_t, __global uint *);\n" "void __ovld vstore8(long8, size_t, __global long *);\n" "void __ovld vstore8(ulong8, size_t, __global ulong *);\n" "void __ovld vstore8(float8, size_t, __global float *);\n" "void __ovld vstore16(char16, size_t, __global char *);\n" "void __ovld vstore16(uchar16, size_t, __global uchar *);\n" "void __ovld vstore16(short16, size_t, __global short *);\n" "void __ovld vstore16(ushort16, size_t, __global ushort *);\n" "void __ovld vstore16(int16, size_t, __global int *);\n" "void __ovld vstore16(uint16, size_t, __global uint *);\n" "void __ovld vstore16(long16, size_t, __global long *);\n" "void __ovld vstore16(ulong16, size_t, __global ulong *);\n" "void __ovld vstore16(float16, size_t, __global float *);\n" "void __ovld vstore2(char2, size_t, __local char *);\n" "void __ovld vstore2(uchar2, size_t, __local uchar *);\n" "void __ovld vstore2(short2, size_t, __local short *);\n" "void __ovld vstore2(ushort2, size_t, __local ushort *);\n" "void __ovld vstore2(int2, size_t, __local int *);\n" "void __ovld vstore2(uint2, size_t, __local uint *);\n" "void __ovld vstore2(long2, size_t, __local long *);\n" "void __ovld vstore2(ulong2, size_t, __local ulong *);\n" "void __ovld vstore2(float2, size_t, __local float *);\n" "void __ovld vstore3(char3, size_t, __local char *);\n" "void __ovld vstore3(uchar3, size_t, __local uchar *);\n" "void __ovld vstore3(short3, size_t, __local short *);\n" "void __ovld vstore3(ushort3, size_t, __local ushort *);\n" "void __ovld vstore3(int3, size_t, __local int *);\n" "void __ovld vstore3(uint3, size_t, __local uint *);\n" "void __ovld vstore3(long3, size_t, __local long *);\n" "void __ovld vstore3(ulong3, size_t, __local ulong *);\n" "void __ovld vstore3(float3, size_t, __local float *);\n" "void __ovld vstore4(char4, size_t, __local char *);\n" "void __ovld vstore4(uchar4, size_t, __local uchar *);\n" "void __ovld vstore4(short4, size_t, __local short *);\n" "void __ovld vstore4(ushort4, size_t, __local ushort *);\n" "void __ovld vstore4(int4, size_t, __local int *);\n" "void __ovld vstore4(uint4, size_t, __local uint *);\n" "void __ovld vstore4(long4, size_t, __local long *);\n" "void __ovld vstore4(ulong4, size_t, __local ulong *);\n" "void __ovld vstore4(float4, size_t, __local float *);\n" "void __ovld vstore8(char8, size_t, __local char *);\n" "void __ovld vstore8(uchar8, size_t, __local uchar *);\n" "void __ovld vstore8(short8, size_t, __local short *);\n" "void __ovld vstore8(ushort8, size_t, __local ushort *);\n" "void __ovld vstore8(int8, size_t, __local int *);\n" "void __ovld vstore8(uint8, size_t, __local uint *);\n" "void __ovld vstore8(long8, size_t, __local long *);\n" "void __ovld vstore8(ulong8, size_t, __local ulong *);\n" "void __ovld vstore8(float8, size_t, __local float *);\n" "void __ovld vstore16(char16, size_t, __local char *);\n" "void __ovld vstore16(uchar16, size_t, __local uchar *);\n" "void __ovld vstore16(short16, size_t, __local short *);\n" "void __ovld vstore16(ushort16, size_t, __local ushort *);\n" "void __ovld vstore16(int16, size_t, __local int *);\n" "void __ovld vstore16(uint16, size_t, __local uint *);\n" "void __ovld vstore16(long16, size_t, __local long *);\n" "void __ovld vstore16(ulong16, size_t, __local ulong *);\n" "void __ovld vstore16(float16, size_t, __local float *);\n" "void __ovld vstore2(char2, size_t, __private char *);\n" "void __ovld vstore2(uchar2, size_t, __private uchar *);\n" "void __ovld vstore2(short2, size_t, __private short *);\n" "void __ovld vstore2(ushort2, size_t, __private ushort *);\n" "void __ovld vstore2(int2, size_t, __private int *);\n" "void __ovld vstore2(uint2, size_t, __private uint *);\n" "void __ovld vstore2(long2, size_t, __private long *);\n" "void __ovld vstore2(ulong2, size_t, __private ulong *);\n" "void __ovld vstore2(float2, size_t, __private float *);\n" "void __ovld vstore3(char3, size_t, __private char *);\n" "void __ovld vstore3(uchar3, size_t, __private uchar *);\n" "void __ovld vstore3(short3, size_t, __private short *);\n" "void __ovld vstore3(ushort3, size_t, __private ushort *);\n" "void __ovld vstore3(int3, size_t, __private int *);\n" "void __ovld vstore3(uint3, size_t, __private uint *);\n" "void __ovld vstore3(long3, size_t, __private long *);\n" "void __ovld vstore3(ulong3, size_t, __private ulong *);\n" "void __ovld vstore3(float3, size_t, __private float *);\n" "void __ovld vstore4(char4, size_t, __private char *);\n" "void __ovld vstore4(uchar4, size_t, __private uchar *);\n" "void __ovld vstore4(short4, size_t, __private short *);\n" "void __ovld vstore4(ushort4, size_t, __private ushort *);\n" "void __ovld vstore4(int4, size_t, __private int *);\n" "void __ovld vstore4(uint4, size_t, __private uint *);\n" "void __ovld vstore4(long4, size_t, __private long *);\n" "void __ovld vstore4(ulong4, size_t, __private ulong *);\n" "void __ovld vstore4(float4, size_t, __private float *);\n" "void __ovld vstore8(char8, size_t, __private char *);\n" "void __ovld vstore8(uchar8, size_t, __private uchar *);\n" "void __ovld vstore8(short8, size_t, __private short *);\n" "void __ovld vstore8(ushort8, size_t, __private ushort *);\n" "void __ovld vstore8(int8, size_t, __private int *);\n" "void __ovld vstore8(uint8, size_t, __private uint *);\n" "void __ovld vstore8(long8, size_t, __private long *);\n" "void __ovld vstore8(ulong8, size_t, __private ulong *);\n" "void __ovld vstore8(float8, size_t, __private float *);\n" "void __ovld vstore16(char16, size_t, __private char *);\n" "void __ovld vstore16(uchar16, size_t, __private uchar *);\n" "void __ovld vstore16(short16, size_t, __private short *);\n" "void __ovld vstore16(ushort16, size_t, __private ushort *);\n" "void __ovld vstore16(int16, size_t, __private int *);\n" "void __ovld vstore16(uint16, size_t, __private uint *);\n" "void __ovld vstore16(long16, size_t, __private long *);\n" "void __ovld vstore16(ulong16, size_t, __private ulong *);\n" "void __ovld vstore16(float16, size_t, __private float *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore2(double2, size_t, __global double *);\n" "void __ovld vstore3(double3, size_t, __global double *);\n" "void __ovld vstore4(double4, size_t, __global double *);\n" "void __ovld vstore8(double8, size_t, __global double *);\n" "void __ovld vstore16(double16, size_t, __global double *);\n" "void __ovld vstore2(double2, size_t, __local double *);\n" "void __ovld vstore3(double3, size_t, __local double *);\n" "void __ovld vstore4(double4, size_t, __local double *);\n" "void __ovld vstore8(double8, size_t, __local double *);\n" "void __ovld vstore16(double16, size_t, __local double *);\n" "void __ovld vstore2(double2, size_t, __private double *);\n" "void __ovld vstore3(double3, size_t, __private double *);\n" "void __ovld vstore4(double4, size_t, __private double *);\n" "void __ovld vstore8(double8, size_t, __private double *);\n" "void __ovld vstore16(double16, size_t, __private double *);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "void __ovld vstore2(half2, size_t, __global half *);\n" "void __ovld vstore3(half3, size_t, __global half *);\n" "void __ovld vstore4(half4, size_t, __global half *);\n" "void __ovld vstore8(half8, size_t, __global half *);\n" "void __ovld vstore16(half16, size_t, __global half *);\n" "void __ovld vstore2(half2, size_t, __local half *);\n" "void __ovld vstore3(half3, size_t, __local half *);\n" "void __ovld vstore4(half4, size_t, __local half *);\n" "void __ovld vstore8(half8, size_t, __local half *);\n" "void __ovld vstore16(half16, size_t, __local half *);\n" "void __ovld vstore2(half2, size_t, __private half *);\n" "void __ovld vstore3(half3, size_t, __private half *);\n" "void __ovld vstore4(half4, size_t, __private half *);\n" "void __ovld vstore8(half8, size_t, __private half *);\n" "void __ovld vstore16(half16, size_t, __private half *);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Read sizeof (half) bytes of data from address\n" " * (p + offset). The data read is interpreted as a\n" " * half value. The half value is converted to a\n" " * float value and the float value is returned.\n" " * The read address computed as (p + offset)\n" " * must be 16-bit aligned.\n" " */\n" "float __ovld __purefn vload_half(size_t, const __constant half *);\n" "#if defined(__opencl_c_generic_address_space)\n" "float __ovld __purefn vload_half(size_t, const half *);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float __ovld __purefn vload_half(size_t, const __global half *);\n" "float __ovld __purefn vload_half(size_t, const __local half *);\n" "float __ovld __purefn vload_half(size_t, const __private half *);\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * Read sizeof (halfn) bytes of data from address\n" " * (p + (offset * n)). The data read is interpreted\n" " * as a halfn value. The halfn value read is\n" " * converted to a floatn value and the floatn\n" " * value is returned. The read address computed\n" " * as (p + (offset * n)) must be 16-bit aligned.\n" " */\n" "float2 __ovld __purefn vload_half2(size_t, const __constant half *);\n" "float3 __ovld __purefn vload_half3(size_t, const __constant half *);\n" "float4 __ovld __purefn vload_half4(size_t, const __constant half *);\n" "float8 __ovld __purefn vload_half8(size_t, const __constant half *);\n" "float16 __ovld __purefn vload_half16(size_t, const __constant half *);\n" "#if defined(__opencl_c_generic_address_space)\n" "float2 __ovld __purefn vload_half2(size_t, const half *);\n" "float3 __ovld __purefn vload_half3(size_t, const half *);\n" "float4 __ovld __purefn vload_half4(size_t, const half *);\n" "float8 __ovld __purefn vload_half8(size_t, const half *);\n" "float16 __ovld __purefn vload_half16(size_t, const half *);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float2 __ovld __purefn vload_half2(size_t, const __global half *);\n" "float3 __ovld __purefn vload_half3(size_t, const __global half *);\n" "float4 __ovld __purefn vload_half4(size_t, const __global half *);\n" "float8 __ovld __purefn vload_half8(size_t, const __global half *);\n" "float16 __ovld __purefn vload_half16(size_t, const __global half *);\n" "float2 __ovld __purefn vload_half2(size_t, const __local half *);\n" "float3 __ovld __purefn vload_half3(size_t, const __local half *);\n" "float4 __ovld __purefn vload_half4(size_t, const __local half *);\n" "float8 __ovld __purefn vload_half8(size_t, const __local half *);\n" "float16 __ovld __purefn vload_half16(size_t, const __local half *);\n" "float2 __ovld __purefn vload_half2(size_t, const __private half *);\n" "float3 __ovld __purefn vload_half3(size_t, const __private half *);\n" "float4 __ovld __purefn vload_half4(size_t, const __private half *);\n" "float8 __ovld __purefn vload_half8(size_t, const __private half *);\n" "float16 __ovld __purefn vload_half16(size_t, const __private half *);\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * The float value given by data is first\n" " * converted to a half value using the appropriate\n" " * rounding mode. The half value is then written\n" " * to address computed as (p + offset). The\n" " * address computed as (p + offset) must be 16-\n" " * bit aligned.\n" " * vstore_half use the current rounding mode.\n" " * The default current rounding mode is round to\n" " * nearest even.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld vstore_half(float, size_t, half *);\n" "void __ovld vstore_half_rte(float, size_t, half *);\n" "void __ovld vstore_half_rtz(float, size_t, half *);\n" "void __ovld vstore_half_rtp(float, size_t, half *);\n" "void __ovld vstore_half_rtn(float, size_t, half *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore_half(double, size_t, half *);\n" "void __ovld vstore_half_rte(double, size_t, half *);\n" "void __ovld vstore_half_rtz(double, size_t, half *);\n" "void __ovld vstore_half_rtp(double, size_t, half *);\n" "void __ovld vstore_half_rtn(double, size_t, half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "void __ovld vstore_half(float, size_t, __global half *);\n" "void __ovld vstore_half_rte(float, size_t, __global half *);\n" "void __ovld vstore_half_rtz(float, size_t, __global half *);\n" "void __ovld vstore_half_rtp(float, size_t, __global half *);\n" "void __ovld vstore_half_rtn(float, size_t, __global half *);\n" "void __ovld vstore_half(float, size_t, __local half *);\n" "void __ovld vstore_half_rte(float, size_t, __local half *);\n" "void __ovld vstore_half_rtz(float, size_t, __local half *);\n" "void __ovld vstore_half_rtp(float, size_t, __local half *);\n" "void __ovld vstore_half_rtn(float, size_t, __local half *);\n" "void __ovld vstore_half(float, size_t, __private half *);\n" "void __ovld vstore_half_rte(float, size_t, __private half *);\n" "void __ovld vstore_half_rtz(float, size_t, __private half *);\n" "void __ovld vstore_half_rtp(float, size_t, __private half *);\n" "void __ovld vstore_half_rtn(float, size_t, __private half *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore_half(double, size_t, __global half *);\n" "void __ovld vstore_half_rte(double, size_t, __global half *);\n" "void __ovld vstore_half_rtz(double, size_t, __global half *);\n" "void __ovld vstore_half_rtp(double, size_t, __global half *);\n" "void __ovld vstore_half_rtn(double, size_t, __global half *);\n" "void __ovld vstore_half(double, size_t, __local half *);\n" "void __ovld vstore_half_rte(double, size_t, __local half *);\n" "void __ovld vstore_half_rtz(double, size_t, __local half *);\n" "void __ovld vstore_half_rtp(double, size_t, __local half *);\n" "void __ovld vstore_half_rtn(double, size_t, __local half *);\n" "void __ovld vstore_half(double, size_t, __private half *);\n" "void __ovld vstore_half_rte(double, size_t, __private half *);\n" "void __ovld vstore_half_rtz(double, size_t, __private half *);\n" "void __ovld vstore_half_rtp(double, size_t, __private half *);\n" "void __ovld vstore_half_rtn(double, size_t, __private half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * The floatn value given by data is converted to\n" " * a halfn value using the appropriate rounding\n" " * mode. The halfn value is then written to\n" " * address computed as (p + (offset * n)). The\n" " * address computed as (p + (offset * n)) must be\n" " * 16-bit aligned.\n" " * vstore_halfn uses the current rounding mode.\n" " * The default current rounding mode is round to\n" " * nearest even.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld vstore_half2(float2, size_t, half *);\n" "void __ovld vstore_half3(float3, size_t, half *);\n" "void __ovld vstore_half4(float4, size_t, half *);\n" "void __ovld vstore_half8(float8, size_t, half *);\n" "void __ovld vstore_half16(float16, size_t, half *);\n" "void __ovld vstore_half2_rte(float2, size_t, half *);\n" "void __ovld vstore_half3_rte(float3, size_t, half *);\n" "void __ovld vstore_half4_rte(float4, size_t, half *);\n" "void __ovld vstore_half8_rte(float8, size_t, half *);\n" "void __ovld vstore_half16_rte(float16, size_t, half *);\n" "void __ovld vstore_half2_rtz(float2, size_t, half *);\n" "void __ovld vstore_half3_rtz(float3, size_t, half *);\n" "void __ovld vstore_half4_rtz(float4, size_t, half *);\n" "void __ovld vstore_half8_rtz(float8, size_t, half *);\n" "void __ovld vstore_half16_rtz(float16, size_t, half *);\n" "void __ovld vstore_half2_rtp(float2, size_t, half *);\n" "void __ovld vstore_half3_rtp(float3, size_t, half *);\n" "void __ovld vstore_half4_rtp(float4, size_t, half *);\n" "void __ovld vstore_half8_rtp(float8, size_t, half *);\n" "void __ovld vstore_half16_rtp(float16, size_t, half *);\n" "void __ovld vstore_half2_rtn(float2, size_t, half *);\n" "void __ovld vstore_half3_rtn(float3, size_t, half *);\n" "void __ovld vstore_half4_rtn(float4, size_t, half *);\n" "void __ovld vstore_half8_rtn(float8, size_t, half *);\n" "void __ovld vstore_half16_rtn(float16, size_t, half *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore_half2(double2, size_t, half *);\n" "void __ovld vstore_half3(double3, size_t, half *);\n" "void __ovld vstore_half4(double4, size_t, half *);\n" "void __ovld vstore_half8(double8, size_t, half *);\n" "void __ovld vstore_half16(double16, size_t, half *);\n" "void __ovld vstore_half2_rte(double2, size_t, half *);\n" "void __ovld vstore_half3_rte(double3, size_t, half *);\n" "void __ovld vstore_half4_rte(double4, size_t, half *);\n" "void __ovld vstore_half8_rte(double8, size_t, half *);\n" "void __ovld vstore_half16_rte(double16, size_t, half *);\n" "void __ovld vstore_half2_rtz(double2, size_t, half *);\n" "void __ovld vstore_half3_rtz(double3, size_t, half *);\n" "void __ovld vstore_half4_rtz(double4, size_t, half *);\n" "void __ovld vstore_half8_rtz(double8, size_t, half *);\n" "void __ovld vstore_half16_rtz(double16, size_t, half *);\n" "void __ovld vstore_half2_rtp(double2, size_t, half *);\n" "void __ovld vstore_half3_rtp(double3, size_t, half *);\n" "void __ovld vstore_half4_rtp(double4, size_t, half *);\n" "void __ovld vstore_half8_rtp(double8, size_t, half *);\n" "void __ovld vstore_half16_rtp(double16, size_t, half *);\n" "void __ovld vstore_half2_rtn(double2, size_t, half *);\n" "void __ovld vstore_half3_rtn(double3, size_t, half *);\n" "void __ovld vstore_half4_rtn(double4, size_t, half *);\n" "void __ovld vstore_half8_rtn(double8, size_t, half *);\n" "void __ovld vstore_half16_rtn(double16, size_t, half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "void __ovld vstore_half2(float2, size_t, __global half *);\n" "void __ovld vstore_half3(float3, size_t, __global half *);\n" "void __ovld vstore_half4(float4, size_t, __global half *);\n" "void __ovld vstore_half8(float8, size_t, __global half *);\n" "void __ovld vstore_half16(float16, size_t, __global half *);\n" "void __ovld vstore_half2_rte(float2, size_t, __global half *);\n" "void __ovld vstore_half3_rte(float3, size_t, __global half *);\n" "void __ovld vstore_half4_rte(float4, size_t, __global half *);\n" "void __ovld vstore_half8_rte(float8, size_t, __global half *);\n" "void __ovld vstore_half16_rte(float16, size_t, __global half *);\n" "void __ovld vstore_half2_rtz(float2, size_t, __global half *);\n" "void __ovld vstore_half3_rtz(float3, size_t, __global half *);\n" "void __ovld vstore_half4_rtz(float4, size_t, __global half *);\n" "void __ovld vstore_half8_rtz(float8, size_t, __global half *);\n" "void __ovld vstore_half16_rtz(float16, size_t, __global half *);\n" "void __ovld vstore_half2_rtp(float2, size_t, __global half *);\n" "void __ovld vstore_half3_rtp(float3, size_t, __global half *);\n" "void __ovld vstore_half4_rtp(float4, size_t, __global half *);\n" "void __ovld vstore_half8_rtp(float8, size_t, __global half *);\n" "void __ovld vstore_half16_rtp(float16, size_t, __global half *);\n" "void __ovld vstore_half2_rtn(float2, size_t, __global half *);\n" "void __ovld vstore_half3_rtn(float3, size_t, __global half *);\n" "void __ovld vstore_half4_rtn(float4, size_t, __global half *);\n" "void __ovld vstore_half8_rtn(float8, size_t, __global half *);\n" "void __ovld vstore_half16_rtn(float16, size_t, __global half *);\n" "void __ovld vstore_half2(float2, size_t, __local half *);\n" "void __ovld vstore_half3(float3, size_t, __local half *);\n" "void __ovld vstore_half4(float4, size_t, __local half *);\n" "void __ovld vstore_half8(float8, size_t, __local half *);\n" "void __ovld vstore_half16(float16, size_t, __local half *);\n" "void __ovld vstore_half2_rte(float2, size_t, __local half *);\n" "void __ovld vstore_half3_rte(float3, size_t, __local half *);\n" "void __ovld vstore_half4_rte(float4, size_t, __local half *);\n" "void __ovld vstore_half8_rte(float8, size_t, __local half *);\n" "void __ovld vstore_half16_rte(float16, size_t, __local half *);\n" "void __ovld vstore_half2_rtz(float2, size_t, __local half *);\n" "void __ovld vstore_half3_rtz(float3, size_t, __local half *);\n" "void __ovld vstore_half4_rtz(float4, size_t, __local half *);\n" "void __ovld vstore_half8_rtz(float8, size_t, __local half *);\n" "void __ovld vstore_half16_rtz(float16, size_t, __local half *);\n" "void __ovld vstore_half2_rtp(float2, size_t, __local half *);\n" "void __ovld vstore_half3_rtp(float3, size_t, __local half *);\n" "void __ovld vstore_half4_rtp(float4, size_t, __local half *);\n" "void __ovld vstore_half8_rtp(float8, size_t, __local half *);\n" "void __ovld vstore_half16_rtp(float16, size_t, __local half *);\n" "void __ovld vstore_half2_rtn(float2, size_t, __local half *);\n" "void __ovld vstore_half3_rtn(float3, size_t, __local half *);\n" "void __ovld vstore_half4_rtn(float4, size_t, __local half *);\n" "void __ovld vstore_half8_rtn(float8, size_t, __local half *);\n" "void __ovld vstore_half16_rtn(float16, size_t, __local half *);\n" "void __ovld vstore_half2(float2, size_t, __private half *);\n" "void __ovld vstore_half3(float3, size_t, __private half *);\n" "void __ovld vstore_half4(float4, size_t, __private half *);\n" "void __ovld vstore_half8(float8, size_t, __private half *);\n" "void __ovld vstore_half16(float16, size_t, __private half *);\n" "void __ovld vstore_half2_rte(float2, size_t, __private half *);\n" "void __ovld vstore_half3_rte(float3, size_t, __private half *);\n" "void __ovld vstore_half4_rte(float4, size_t, __private half *);\n" "void __ovld vstore_half8_rte(float8, size_t, __private half *);\n" "void __ovld vstore_half16_rte(float16, size_t, __private half *);\n" "void __ovld vstore_half2_rtz(float2, size_t, __private half *);\n" "void __ovld vstore_half3_rtz(float3, size_t, __private half *);\n" "void __ovld vstore_half4_rtz(float4, size_t, __private half *);\n" "void __ovld vstore_half8_rtz(float8, size_t, __private half *);\n" "void __ovld vstore_half16_rtz(float16, size_t, __private half *);\n" "void __ovld vstore_half2_rtp(float2, size_t, __private half *);\n" "void __ovld vstore_half3_rtp(float3, size_t, __private half *);\n" "void __ovld vstore_half4_rtp(float4, size_t, __private half *);\n" "void __ovld vstore_half8_rtp(float8, size_t, __private half *);\n" "void __ovld vstore_half16_rtp(float16, size_t, __private half *);\n" "void __ovld vstore_half2_rtn(float2, size_t, __private half *);\n" "void __ovld vstore_half3_rtn(float3, size_t, __private half *);\n" "void __ovld vstore_half4_rtn(float4, size_t, __private half *);\n" "void __ovld vstore_half8_rtn(float8, size_t, __private half *);\n" "void __ovld vstore_half16_rtn(float16, size_t, __private half *);\n" "#ifdef cl_khr_fp64\n" "void __ovld vstore_half2(double2, size_t, __global half *);\n" "void __ovld vstore_half3(double3, size_t, __global half *);\n" "void __ovld vstore_half4(double4, size_t, __global half *);\n" "void __ovld vstore_half8(double8, size_t, __global half *);\n" "void __ovld vstore_half16(double16, size_t, __global half *);\n" "void __ovld vstore_half2_rte(double2, size_t, __global half *);\n" "void __ovld vstore_half3_rte(double3, size_t, __global half *);\n" "void __ovld vstore_half4_rte(double4, size_t, __global half *);\n" "void __ovld vstore_half8_rte(double8, size_t, __global half *);\n" "void __ovld vstore_half16_rte(double16, size_t, __global half *);\n" "void __ovld vstore_half2_rtz(double2, size_t, __global half *);\n" "void __ovld vstore_half3_rtz(double3, size_t, __global half *);\n" "void __ovld vstore_half4_rtz(double4, size_t, __global half *);\n" "void __ovld vstore_half8_rtz(double8, size_t, __global half *);\n" "void __ovld vstore_half16_rtz(double16, size_t, __global half *);\n" "void __ovld vstore_half2_rtp(double2, size_t, __global half *);\n" "void __ovld vstore_half3_rtp(double3, size_t, __global half *);\n" "void __ovld vstore_half4_rtp(double4, size_t, __global half *);\n" "void __ovld vstore_half8_rtp(double8, size_t, __global half *);\n" "void __ovld vstore_half16_rtp(double16, size_t, __global half *);\n" "void __ovld vstore_half2_rtn(double2, size_t, __global half *);\n" "void __ovld vstore_half3_rtn(double3, size_t, __global half *);\n" "void __ovld vstore_half4_rtn(double4, size_t, __global half *);\n" "void __ovld vstore_half8_rtn(double8, size_t, __global half *);\n" "void __ovld vstore_half16_rtn(double16, size_t, __global half *);\n" "void __ovld vstore_half2(double2, size_t, __local half *);\n" "void __ovld vstore_half3(double3, size_t, __local half *);\n" "void __ovld vstore_half4(double4, size_t, __local half *);\n" "void __ovld vstore_half8(double8, size_t, __local half *);\n" "void __ovld vstore_half16(double16, size_t, __local half *);\n" "void __ovld vstore_half2_rte(double2, size_t, __local half *);\n" "void __ovld vstore_half3_rte(double3, size_t, __local half *);\n" "void __ovld vstore_half4_rte(double4, size_t, __local half *);\n" "void __ovld vstore_half8_rte(double8, size_t, __local half *);\n" "void __ovld vstore_half16_rte(double16, size_t, __local half *);\n" "void __ovld vstore_half2_rtz(double2, size_t, __local half *);\n" "void __ovld vstore_half3_rtz(double3, size_t, __local half *);\n" "void __ovld vstore_half4_rtz(double4, size_t, __local half *);\n" "void __ovld vstore_half8_rtz(double8, size_t, __local half *);\n" "void __ovld vstore_half16_rtz(double16, size_t, __local half *);\n" "void __ovld vstore_half2_rtp(double2, size_t, __local half *);\n" "void __ovld vstore_half3_rtp(double3, size_t, __local half *);\n" "void __ovld vstore_half4_rtp(double4, size_t, __local half *);\n" "void __ovld vstore_half8_rtp(double8, size_t, __local half *);\n" "void __ovld vstore_half16_rtp(double16, size_t, __local half *);\n" "void __ovld vstore_half2_rtn(double2, size_t, __local half *);\n" "void __ovld vstore_half3_rtn(double3, size_t, __local half *);\n" "void __ovld vstore_half4_rtn(double4, size_t, __local half *);\n" "void __ovld vstore_half8_rtn(double8, size_t, __local half *);\n" "void __ovld vstore_half16_rtn(double16, size_t, __local half *);\n" "void __ovld vstore_half2(double2, size_t, __private half *);\n" "void __ovld vstore_half3(double3, size_t, __private half *);\n" "void __ovld vstore_half4(double4, size_t, __private half *);\n" "void __ovld vstore_half8(double8, size_t, __private half *);\n" "void __ovld vstore_half16(double16, size_t, __private half *);\n" "void __ovld vstore_half2_rte(double2, size_t, __private half *);\n" "void __ovld vstore_half3_rte(double3, size_t, __private half *);\n" "void __ovld vstore_half4_rte(double4, size_t, __private half *);\n" "void __ovld vstore_half8_rte(double8, size_t, __private half *);\n" "void __ovld vstore_half16_rte(double16, size_t, __private half *);\n" "void __ovld vstore_half2_rtz(double2, size_t, __private half *);\n" "void __ovld vstore_half3_rtz(double3, size_t, __private half *);\n" "void __ovld vstore_half4_rtz(double4, size_t, __private half *);\n" "void __ovld vstore_half8_rtz(double8, size_t, __private half *);\n" "void __ovld vstore_half16_rtz(double16, size_t, __private half *);\n" "void __ovld vstore_half2_rtp(double2, size_t, __private half *);\n" "void __ovld vstore_half3_rtp(double3, size_t, __private half *);\n" "void __ovld vstore_half4_rtp(double4, size_t, __private half *);\n" "void __ovld vstore_half8_rtp(double8, size_t, __private half *);\n" "void __ovld vstore_half16_rtp(double16, size_t, __private half *);\n" "void __ovld vstore_half2_rtn(double2, size_t, __private half *);\n" "void __ovld vstore_half3_rtn(double3, size_t, __private half *);\n" "void __ovld vstore_half4_rtn(double4, size_t, __private half *);\n" "void __ovld vstore_half8_rtn(double8, size_t, __private half *);\n" "void __ovld vstore_half16_rtn(double16, size_t, __private half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)\n" " * bytes of data from address (p + (offset * n)).\n" " * The data read is interpreted as a halfn value.\n" " * The halfn value read is converted to a floatn\n" " * value and the floatn value is returned.\n" " * The address computed as (p + (offset * n))\n" " * must be aligned to sizeof (halfn) bytes.\n" " * For n = 3, vloada_half3 reads a half3 from\n" " * address (p + (offset * 4)) and returns a float3.\n" " * The address computed as (p + (offset * 4))\n" " * must be aligned to sizeof (half) * 4 bytes.\n" " */\n" "float2 __ovld __purefn vloada_half2(size_t, const __constant half *);\n" "float3 __ovld __purefn vloada_half3(size_t, const __constant half *);\n" "float4 __ovld __purefn vloada_half4(size_t, const __constant half *);\n" "float8 __ovld __purefn vloada_half8(size_t, const __constant half *);\n" "float16 __ovld __purefn vloada_half16(size_t, const __constant half *);\n" "#if defined(__opencl_c_generic_address_space)\n" "float2 __ovld __purefn vloada_half2(size_t, const half *);\n" "float3 __ovld __purefn vloada_half3(size_t, const half *);\n" "float4 __ovld __purefn vloada_half4(size_t, const half *);\n" "float8 __ovld __purefn vloada_half8(size_t, const half *);\n" "float16 __ovld __purefn vloada_half16(size_t, const half *);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "float2 __ovld __purefn vloada_half2(size_t, const __global half *);\n" "float3 __ovld __purefn vloada_half3(size_t, const __global half *);\n" "float4 __ovld __purefn vloada_half4(size_t, const __global half *);\n" "float8 __ovld __purefn vloada_half8(size_t, const __global half *);\n" "float16 __ovld __purefn vloada_half16(size_t, const __global half *);\n" "float2 __ovld __purefn vloada_half2(size_t, const __local half *);\n" "float3 __ovld __purefn vloada_half3(size_t, const __local half *);\n" "float4 __ovld __purefn vloada_half4(size_t, const __local half *);\n" "float8 __ovld __purefn vloada_half8(size_t, const __local half *);\n" "float16 __ovld __purefn vloada_half16(size_t, const __local half *);\n" "float2 __ovld __purefn vloada_half2(size_t, const __private half *);\n" "float3 __ovld __purefn vloada_half3(size_t, const __private half *);\n" "float4 __ovld __purefn vloada_half4(size_t, const __private half *);\n" "float8 __ovld __purefn vloada_half8(size_t, const __private half *);\n" "float16 __ovld __purefn vloada_half16(size_t, const __private half *);\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "/**\n" " * The floatn value given by data is converted to\n" " * a halfn value using the appropriate rounding\n" " * mode.\n" " * For n = 1, 2, 4, 8 and 16, the halfn value is\n" " * written to the address computed as (p + (offset\n" " * * n)). The address computed as (p + (offset *\n" " * n)) must be aligned to sizeof (halfn) bytes.\n" " * For n = 3, the half3 value is written to the\n" " * address computed as (p + (offset * 4)). The\n" " * address computed as (p + (offset * 4)) must be\n" " * aligned to sizeof (half) * 4 bytes.\n" " * vstorea_halfn uses the current rounding\n" " * mode. The default current rounding mode is\n" " * round to nearest even.\n" " */\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld vstorea_half2(float2, size_t, half *);\n" "void __ovld vstorea_half3(float3, size_t, half *);\n" "void __ovld vstorea_half4(float4, size_t, half *);\n" "void __ovld vstorea_half8(float8, size_t, half *);\n" "void __ovld vstorea_half16(float16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rte(float2, size_t, half *);\n" "void __ovld vstorea_half3_rte(float3, size_t, half *);\n" "void __ovld vstorea_half4_rte(float4, size_t, half *);\n" "void __ovld vstorea_half8_rte(float8, size_t, half *);\n" "void __ovld vstorea_half16_rte(float16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtz(float2, size_t, half *);\n" "void __ovld vstorea_half3_rtz(float3, size_t, half *);\n" "void __ovld vstorea_half4_rtz(float4, size_t, half *);\n" "void __ovld vstorea_half8_rtz(float8, size_t, half *);\n" "void __ovld vstorea_half16_rtz(float16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtp(float2, size_t, half *);\n" "void __ovld vstorea_half3_rtp(float3, size_t, half *);\n" "void __ovld vstorea_half4_rtp(float4, size_t, half *);\n" "void __ovld vstorea_half8_rtp(float8, size_t, half *);\n" "void __ovld vstorea_half16_rtp(float16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtn(float2, size_t, half *);\n" "void __ovld vstorea_half3_rtn(float3, size_t, half *);\n" "void __ovld vstorea_half4_rtn(float4, size_t, half *);\n" "void __ovld vstorea_half8_rtn(float8, size_t, half *);\n" "void __ovld vstorea_half16_rtn(float16, size_t, half *);\n" "\n" "#ifdef cl_khr_fp64\n" "void __ovld vstorea_half2(double2, size_t, half *);\n" "void __ovld vstorea_half3(double3, size_t, half *);\n" "void __ovld vstorea_half4(double4, size_t, half *);\n" "void __ovld vstorea_half8(double8, size_t, half *);\n" "void __ovld vstorea_half16(double16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rte(double2, size_t, half *);\n" "void __ovld vstorea_half3_rte(double3, size_t, half *);\n" "void __ovld vstorea_half4_rte(double4, size_t, half *);\n" "void __ovld vstorea_half8_rte(double8, size_t, half *);\n" "void __ovld vstorea_half16_rte(double16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtz(double2, size_t, half *);\n" "void __ovld vstorea_half3_rtz(double3, size_t, half *);\n" "void __ovld vstorea_half4_rtz(double4, size_t, half *);\n" "void __ovld vstorea_half8_rtz(double8, size_t, half *);\n" "void __ovld vstorea_half16_rtz(double16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtp(double2, size_t, half *);\n" "void __ovld vstorea_half3_rtp(double3, size_t, half *);\n" "void __ovld vstorea_half4_rtp(double4, size_t, half *);\n" "void __ovld vstorea_half8_rtp(double8, size_t, half *);\n" "void __ovld vstorea_half16_rtp(double16, size_t, half *);\n" "\n" "void __ovld vstorea_half2_rtn(double2, size_t, half *);\n" "void __ovld vstorea_half3_rtn(double3, size_t, half *);\n" "void __ovld vstorea_half4_rtn(double4, size_t, half *);\n" "void __ovld vstorea_half8_rtn(double8, size_t, half *);\n" "void __ovld vstorea_half16_rtn(double16, size_t, half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "#if defined(__opencl_c_named_address_space_builtins)\n" "void __ovld vstorea_half2(float2, size_t, __global half *);\n" "void __ovld vstorea_half3(float3, size_t, __global half *);\n" "void __ovld vstorea_half4(float4, size_t, __global half *);\n" "void __ovld vstorea_half8(float8, size_t, __global half *);\n" "void __ovld vstorea_half16(float16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rte(float2, size_t, __global half *);\n" "void __ovld vstorea_half3_rte(float3, size_t, __global half *);\n" "void __ovld vstorea_half4_rte(float4, size_t, __global half *);\n" "void __ovld vstorea_half8_rte(float8, size_t, __global half *);\n" "void __ovld vstorea_half16_rte(float16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtz(float2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtz(float3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtz(float4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtz(float8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtz(float16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtp(float2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtp(float3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtp(float4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtp(float8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtp(float16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtn(float2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtn(float3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtn(float4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtn(float8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtn(float16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2(float2, size_t, __local half *);\n" "void __ovld vstorea_half3(float3, size_t, __local half *);\n" "void __ovld vstorea_half4(float4, size_t, __local half *);\n" "void __ovld vstorea_half8(float8, size_t, __local half *);\n" "void __ovld vstorea_half16(float16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rte(float2, size_t, __local half *);\n" "void __ovld vstorea_half3_rte(float3, size_t, __local half *);\n" "void __ovld vstorea_half4_rte(float4, size_t, __local half *);\n" "void __ovld vstorea_half8_rte(float8, size_t, __local half *);\n" "void __ovld vstorea_half16_rte(float16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtz(float2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtz(float3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtz(float4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtz(float8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtz(float16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtp(float2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtp(float3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtp(float4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtp(float8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtp(float16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtn(float2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtn(float3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtn(float4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtn(float8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtn(float16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2(float2, size_t, __private half *);\n" "void __ovld vstorea_half3(float3, size_t, __private half *);\n" "void __ovld vstorea_half4(float4, size_t, __private half *);\n" "void __ovld vstorea_half8(float8, size_t, __private half *);\n" "void __ovld vstorea_half16(float16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rte(float2, size_t, __private half *);\n" "void __ovld vstorea_half3_rte(float3, size_t, __private half *);\n" "void __ovld vstorea_half4_rte(float4, size_t, __private half *);\n" "void __ovld vstorea_half8_rte(float8, size_t, __private half *);\n" "void __ovld vstorea_half16_rte(float16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtz(float2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtz(float3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtz(float4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtz(float8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtz(float16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtp(float2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtp(float3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtp(float4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtp(float8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtp(float16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtn(float2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtn(float3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtn(float4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtn(float8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtn(float16, size_t, __private half *);\n" "\n" "#ifdef cl_khr_fp64\n" "void __ovld vstorea_half2(double2, size_t, __global half *);\n" "void __ovld vstorea_half3(double3, size_t, __global half *);\n" "void __ovld vstorea_half4(double4, size_t, __global half *);\n" "void __ovld vstorea_half8(double8, size_t, __global half *);\n" "void __ovld vstorea_half16(double16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rte(double2, size_t, __global half *);\n" "void __ovld vstorea_half3_rte(double3, size_t, __global half *);\n" "void __ovld vstorea_half4_rte(double4, size_t, __global half *);\n" "void __ovld vstorea_half8_rte(double8, size_t, __global half *);\n" "void __ovld vstorea_half16_rte(double16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtz(double2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtz(double3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtz(double4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtz(double8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtz(double16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtp(double2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtp(double3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtp(double4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtp(double8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtp(double16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2_rtn(double2, size_t, __global half *);\n" "void __ovld vstorea_half3_rtn(double3, size_t, __global half *);\n" "void __ovld vstorea_half4_rtn(double4, size_t, __global half *);\n" "void __ovld vstorea_half8_rtn(double8, size_t, __global half *);\n" "void __ovld vstorea_half16_rtn(double16, size_t, __global half *);\n" "\n" "void __ovld vstorea_half2(double2, size_t, __local half *);\n" "void __ovld vstorea_half3(double3, size_t, __local half *);\n" "void __ovld vstorea_half4(double4, size_t, __local half *);\n" "void __ovld vstorea_half8(double8, size_t, __local half *);\n" "void __ovld vstorea_half16(double16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rte(double2, size_t, __local half *);\n" "void __ovld vstorea_half3_rte(double3, size_t, __local half *);\n" "void __ovld vstorea_half4_rte(double4, size_t, __local half *);\n" "void __ovld vstorea_half8_rte(double8, size_t, __local half *);\n" "void __ovld vstorea_half16_rte(double16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtz(double2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtz(double3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtz(double4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtz(double8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtz(double16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtp(double2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtp(double3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtp(double4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtp(double8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtp(double16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2_rtn(double2, size_t, __local half *);\n" "void __ovld vstorea_half3_rtn(double3, size_t, __local half *);\n" "void __ovld vstorea_half4_rtn(double4, size_t, __local half *);\n" "void __ovld vstorea_half8_rtn(double8, size_t, __local half *);\n" "void __ovld vstorea_half16_rtn(double16, size_t, __local half *);\n" "\n" "void __ovld vstorea_half2(double2, size_t, __private half *);\n" "void __ovld vstorea_half3(double3, size_t, __private half *);\n" "void __ovld vstorea_half4(double4, size_t, __private half *);\n" "void __ovld vstorea_half8(double8, size_t, __private half *);\n" "void __ovld vstorea_half16(double16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rte(double2, size_t, __private half *);\n" "void __ovld vstorea_half3_rte(double3, size_t, __private half *);\n" "void __ovld vstorea_half4_rte(double4, size_t, __private half *);\n" "void __ovld vstorea_half8_rte(double8, size_t, __private half *);\n" "void __ovld vstorea_half16_rte(double16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtz(double2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtz(double3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtz(double4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtz(double8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtz(double16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtp(double2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtp(double3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtp(double4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtp(double8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtp(double16, size_t, __private half *);\n" "\n" "void __ovld vstorea_half2_rtn(double2, size_t, __private half *);\n" "void __ovld vstorea_half3_rtn(double3, size_t, __private half *);\n" "void __ovld vstorea_half4_rtn(double4, size_t, __private half *);\n" "void __ovld vstorea_half8_rtn(double8, size_t, __private half *);\n" "void __ovld vstorea_half16_rtn(double16, size_t, __private half *);\n" "#endif //cl_khr_fp64\n" "#endif //defined(__opencl_c_named_address_space_builtins)\n" "\n" "// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions\n" "\n" "/**\n" " * All work-items in a work-group executing the kernel\n" " * on a processor must execute this function before any\n" " * are allowed to continue execution beyond the barrier.\n" " * This function must be encountered by all work-items in\n" " * a work-group executing the kernel.\n" " * If barrier is inside a conditional statement, then all\n" " * work-items must enter the conditional if any work-item\n" " * enters the conditional statement and executes the\n" " * barrier.\n" " * If barrer is inside a loop, all work-items must execute\n" " * the barrier for each iteration of the loop before any are\n" " * allowed to continue execution beyond the barrier.\n" " * The barrier function also queues a memory fence\n" " * (reads and writes) to ensure correct ordering of\n" " * memory operations to local or global memory.\n" " * The flags argument specifies the memory address space\n" " * and can be set to a combination of the following literal\n" " * values.\n" " * CLK_LOCAL_MEM_FENCE - The barrier function\n" " * will either flush any variables stored in local memory\n" " * or queue a memory fence to ensure correct ordering of\n" " * memory operations to local memory.\n" " * CLK_GLOBAL_MEM_FENCE - The barrier function\n" " * will queue a memory fence to ensure correct ordering\n" " * of memory operations to global memory. This can be\n" " * useful when work-items, for example, write to buffer or\n" " * image objects and then want to read the updated data.\n" " */\n" "\n" "void __ovld __conv barrier(cl_mem_fence_flags);\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "void __ovld __conv work_group_barrier(cl_mem_fence_flags, memory_scope);\n" "void __ovld __conv work_group_barrier(cl_mem_fence_flags);\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions\n" "\n" "/**\n" " * Orders loads and stores of a work-item\n" " * executing a kernel. This means that loads\n" " * and stores preceding the mem_fence will\n" " * be committed to memory before any loads\n" " * and stores following the mem_fence.\n" " * The flags argument specifies the memory\n" " * address space and can be set to a\n" " * combination of the following literal\n" " * values:\n" " * CLK_LOCAL_MEM_FENCE\n" " * CLK_GLOBAL_MEM_FENCE.\n" " */\n" "void __ovld mem_fence(cl_mem_fence_flags);\n" "\n" "/**\n" " * Read memory barrier that orders only\n" " * loads.\n" " * The flags argument specifies the memory\n" " * address space and can be set to a\n" " * combination of the following literal\n" " * values:\n" " * CLK_LOCAL_MEM_FENCE\n" " * CLK_GLOBAL_MEM_FENCE.\n" " */\n" "void __ovld read_mem_fence(cl_mem_fence_flags);\n" "\n" "/**\n" " * Write memory barrier that orders only\n" " * stores.\n" " * The flags argument specifies the memory\n" " * address space and can be set to a\n" " * combination of the following literal\n" " * values:\n" " * CLK_LOCAL_MEM_FENCE\n" " * CLK_GLOBAL_MEM_FENCE.\n" " */\n" "void __ovld write_mem_fence(cl_mem_fence_flags);\n" "\n" "// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "cl_mem_fence_flags __ovld get_fence(const void *ptr);\n" "cl_mem_fence_flags __ovld get_fence(void *ptr);\n" "\n" "/**\n" " * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions\n" " * and checked in Sema since they should be declared as\n" " * addr gentype* to_addr (gentype*);\n" " * where gentype is builtin type or user defined type.\n" " */\n" "\n" "#endif //defined(__opencl_c_generic_address_space)\n" "\n" "// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch\n" "\n" "/**\n" " * event_t async_work_group_copy (\n" " * __global gentype *dst,\n" " * const __local gentype *src,\n" " * size_t num_elements,\n" " * event_t event)\n" " * Perform an async copy of num_elements\n" " * gentype elements from src to dst. The async\n" " * copy is performed by all work-items in a workgroup\n" " * and this built-in function must therefore\n" " * be encountered by all work-items in a workgroup\n" " * executing the kernel with the same\n" " * argument values; otherwise the results are\n" " * undefined.\n" " * Returns an event object that can be used by\n" " * wait_group_events to wait for the async copy\n" " * to finish. The event argument can also be used\n" " * to associate the async_work_group_copy with\n" " * a previous async copy allowing an event to be\n" " * shared by multiple async copies; otherwise event\n" " * should be zero.\n" " * If event argument is non-zero, the event object\n" " * supplied in event argument will be returned.\n" " * This function does not perform any implicit\n" " * synchronization of source data such as using a\n" " * barrier before performing the copy.\n" " */\n" "event_t __ovld async_work_group_copy(__local char *, const __global char *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar *, const __global uchar *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short *, const __global short *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort *, const __global ushort *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int *, const __global int *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint *, const __global uint *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long *, const __global long *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong *, const __global ulong *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float *, const __global float *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local char2 *, const __global char2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar2 *, const __global uchar2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short2 *, const __global short2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort2 *, const __global ushort2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int2 *, const __global int2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint2 *, const __global uint2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long2 *, const __global long2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong2 *, const __global ulong2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float2 *, const __global float2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local char3 *, const __global char3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar3 *, const __global uchar3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short3 *, const __global short3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort3 *, const __global ushort3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int3 *, const __global int3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint3 *, const __global uint3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long3 *, const __global long3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong3 *, const __global ulong3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float3 *, const __global float3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local char4 *, const __global char4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar4 *, const __global uchar4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short4 *, const __global short4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort4 *, const __global ushort4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int4 *, const __global int4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint4 *, const __global uint4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long4 *, const __global long4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong4 *, const __global ulong4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float4 *, const __global float4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local char8 *, const __global char8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar8 *, const __global uchar8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short8 *, const __global short8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort8 *, const __global ushort8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int8 *, const __global int8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint8 *, const __global uint8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long8 *, const __global long8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong8 *, const __global ulong8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float8 *, const __global float8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local char16 *, const __global char16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uchar16 *, const __global uchar16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local short16 *, const __global short16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ushort16 *, const __global ushort16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local int16 *, const __global int16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local uint16 *, const __global uint16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local long16 *, const __global long16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local ulong16 *, const __global ulong16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local float16 *, const __global float16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char *, const __local char *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar *, const __local uchar *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short *, const __local short *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort *, const __local ushort *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int *, const __local int *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint *, const __local uint *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long *, const __local long *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong *, const __local ulong *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float *, const __local float *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char2 *, const __local char2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar2 *, const __local uchar2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short2 *, const __local short2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort2 *, const __local ushort2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int2 *, const __local int2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint2 *, const __local uint2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long2 *, const __local long2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong2 *, const __local ulong2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float2 *, const __local float2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char3 *, const __local char3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar3 *, const __local uchar3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short3 *, const __local short3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort3 *, const __local ushort3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int3 *, const __local int3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint3 *, const __local uint3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long3 *, const __local long3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong3 *, const __local ulong3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float3 *, const __local float3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char4 *, const __local char4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar4 *, const __local uchar4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short4 *, const __local short4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort4 *, const __local ushort4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int4 *, const __local int4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint4 *, const __local uint4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long4 *, const __local long4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong4 *, const __local ulong4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float4 *, const __local float4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char8 *, const __local char8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar8 *, const __local uchar8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short8 *, const __local short8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort8 *, const __local ushort8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int8 *, const __local int8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint8 *, const __local uint8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long8 *, const __local long8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong8 *, const __local ulong8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float8 *, const __local float8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global char16 *, const __local char16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uchar16 *, const __local uchar16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global short16 *, const __local short16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ushort16 *, const __local ushort16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global int16 *, const __local int16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global uint16 *, const __local uint16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global long16 *, const __local long16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global ulong16 *, const __local ulong16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global float16 *, const __local float16 *, size_t, event_t);\n" "#ifdef cl_khr_fp64\n" "event_t __ovld async_work_group_copy(__local double *, const __global double *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local double2 *, const __global double2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local double3 *, const __global double3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local double4 *, const __global double4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local double8 *, const __global double8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local double16 *, const __global double16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double *, const __local double *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double2 *, const __local double2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double3 *, const __local double3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double4 *, const __local double4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double8 *, const __local double8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global double16 *, const __local double16 *, size_t, event_t);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "event_t __ovld async_work_group_copy(__local half *, const __global half *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local half2 *, const __global half2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local half3 *, const __global half3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local half4 *, const __global half4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local half8 *, const __global half8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__local half16 *, const __global half16 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half *, const __local half *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half2 *, const __local half2 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half3 *, const __local half3 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half4 *, const __local half4 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half8 *, const __local half8 *, size_t, event_t);\n" "event_t __ovld async_work_group_copy(__global half16 *, const __local half16 *, size_t, event_t);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Perform an async gather of num_elements\n" " * gentype elements from src to dst. The\n" " * src_stride is the stride in elements for each\n" " * gentype element read from src. The dst_stride\n" " * is the stride in elements for each gentype\n" " * element written to dst. The async gather is\n" " * performed by all work-items in a work-group.\n" " * This built-in function must therefore be\n" " * encountered by all work-items in a work-group\n" " * executing the kernel with the same argument\n" " * values; otherwise the results are undefined.\n" " * Returns an event object that can be used by\n" " * wait_group_events to wait for the async copy\n" " * to finish. The event argument can also be used\n" " * to associate the\n" " * async_work_group_strided_copy with a\n" " * previous async copy allowing an event to be\n" " * shared by multiple async copies; otherwise event\n" " * should be zero.\n" " * If event argument is non-zero, the event object\n" " * supplied in event argument will be returned.\n" " * This function does not perform any implicit\n" " * synchronization of source data such as using a\n" " * barrier before performing the copy.\n" " */\n" "event_t __ovld async_work_group_strided_copy(__local char *, const __global char *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar *, const __global uchar *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short *, const __global short *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort *, const __global ushort *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int *, const __global int *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint *, const __global uint *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long *, const __global long *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong *, const __global ulong *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float *, const __global float *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local char2 *, const __global char2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar2 *, const __global uchar2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short2 *, const __global short2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort2 *, const __global ushort2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int2 *, const __global int2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint2 *, const __global uint2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long2 *, const __global long2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong2 *, const __global ulong2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float2 *, const __global float2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local char3 *, const __global char3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar3 *, const __global uchar3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short3 *, const __global short3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort3 *, const __global ushort3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int3 *, const __global int3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint3 *, const __global uint3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long3 *, const __global long3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong3 *, const __global ulong3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float3 *, const __global float3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local char4 *, const __global char4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar4 *, const __global uchar4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short4 *, const __global short4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort4 *, const __global ushort4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int4 *, const __global int4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint4 *, const __global uint4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long4 *, const __global long4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong4 *, const __global ulong4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float4 *, const __global float4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local char8 *, const __global char8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar8 *, const __global uchar8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short8 *, const __global short8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort8 *, const __global ushort8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int8 *, const __global int8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint8 *, const __global uint8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long8 *, const __global long8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong8 *, const __global ulong8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float8 *, const __global float8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local char16 *, const __global char16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uchar16 *, const __global uchar16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local short16 *, const __global short16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ushort16 *, const __global ushort16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local int16 *, const __global int16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local uint16 *, const __global uint16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local long16 *, const __global long16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local ulong16 *, const __global ulong16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local float16 *, const __global float16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char *, const __local char *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar *, const __local uchar *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short *, const __local short *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort *, const __local ushort *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int *, const __local int *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint *, const __local uint *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long *, const __local long *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong *, const __local ulong *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float *, const __local float *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char2 *, const __local char2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar2 *, const __local uchar2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short2 *, const __local short2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort2 *, const __local ushort2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int2 *, const __local int2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint2 *, const __local uint2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long2 *, const __local long2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong2 *, const __local ulong2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float2 *, const __local float2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char3 *, const __local char3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar3 *, const __local uchar3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short3 *, const __local short3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort3 *, const __local ushort3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int3 *, const __local int3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint3 *, const __local uint3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long3 *, const __local long3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong3 *, const __local ulong3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float3 *, const __local float3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char4 *, const __local char4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar4 *, const __local uchar4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short4 *, const __local short4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort4 *, const __local ushort4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int4 *, const __local int4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint4 *, const __local uint4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long4 *, const __local long4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong4 *, const __local ulong4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float4 *, const __local float4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char8 *, const __local char8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar8 *, const __local uchar8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short8 *, const __local short8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort8 *, const __local ushort8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int8 *, const __local int8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint8 *, const __local uint8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long8 *, const __local long8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong8 *, const __local ulong8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float8 *, const __local float8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global char16 *, const __local char16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uchar16 *, const __local uchar16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global short16 *, const __local short16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ushort16 *, const __local ushort16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global int16 *, const __local int16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global uint16 *, const __local uint16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global long16 *, const __local long16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global ulong16 *, const __local ulong16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global float16 *, const __local float16 *, size_t, size_t, event_t);\n" "#ifdef cl_khr_fp64\n" "event_t __ovld async_work_group_strided_copy(__local double *, const __global double *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local double2 *, const __global double2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local double3 *, const __global double3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local double4 *, const __global double4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local double8 *, const __global double8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local double16 *, const __global double16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double *, const __local double *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double2 *, const __local double2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double3 *, const __local double3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double4 *, const __local double4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double8 *, const __local double8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global double16 *, const __local double16 *, size_t, size_t, event_t);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "event_t __ovld async_work_group_strided_copy(__local half *, const __global half *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local half2 *, const __global half2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local half3 *, const __global half3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local half4 *, const __global half4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local half8 *, const __global half8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__local half16 *, const __global half16 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half *, const __local half *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half2 *, const __local half2 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half3 *, const __local half3 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half4 *, const __local half4 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half8 *, const __local half8 *, size_t, size_t, event_t);\n" "event_t __ovld async_work_group_strided_copy(__global half16 *, const __local half16 *, size_t, size_t, event_t);\n" "#endif //cl_khr_fp16\n" "\n" "/**\n" " * Wait for events that identify the\n" " * async_work_group_copy operations to\n" " * complete. The event objects specified in\n" " * event_list will be released after the wait is\n" " * performed.\n" " * This function must be encountered by all workitems\n" " * in a work-group executing the kernel with\n" " * the same num_events and event objects specified\n" " * in event_list; otherwise the results are undefined.\n" " */\n" "void __ovld wait_group_events(int, event_t *);\n" "\n" "/**\n" " * Prefetch num_elements * sizeof(gentype)\n" " * bytes into the global cache. The prefetch\n" " * instruction is applied to a work-item in a workgroup\n" " * and does not affect the functional\n" " * behavior of the kernel.\n" " */\n" "void __ovld prefetch(const __global char *, size_t);\n" "void __ovld prefetch(const __global uchar *, size_t);\n" "void __ovld prefetch(const __global short *, size_t);\n" "void __ovld prefetch(const __global ushort *, size_t);\n" "void __ovld prefetch(const __global int *, size_t);\n" "void __ovld prefetch(const __global uint *, size_t);\n" "void __ovld prefetch(const __global long *, size_t);\n" "void __ovld prefetch(const __global ulong *, size_t);\n" "void __ovld prefetch(const __global float *, size_t);\n" "void __ovld prefetch(const __global char2 *, size_t);\n" "void __ovld prefetch(const __global uchar2 *, size_t);\n" "void __ovld prefetch(const __global short2 *, size_t);\n" "void __ovld prefetch(const __global ushort2 *, size_t);\n" "void __ovld prefetch(const __global int2 *, size_t);\n" "void __ovld prefetch(const __global uint2 *, size_t);\n" "void __ovld prefetch(const __global long2 *, size_t);\n" "void __ovld prefetch(const __global ulong2 *, size_t);\n" "void __ovld prefetch(const __global float2 *, size_t);\n" "void __ovld prefetch(const __global char3 *, size_t);\n" "void __ovld prefetch(const __global uchar3 *, size_t);\n" "void __ovld prefetch(const __global short3 *, size_t);\n" "void __ovld prefetch(const __global ushort3 *, size_t);\n" "void __ovld prefetch(const __global int3 *, size_t);\n" "void __ovld prefetch(const __global uint3 *, size_t);\n" "void __ovld prefetch(const __global long3 *, size_t);\n" "void __ovld prefetch(const __global ulong3 *, size_t);\n" "void __ovld prefetch(const __global float3 *, size_t);\n" "void __ovld prefetch(const __global char4 *, size_t);\n" "void __ovld prefetch(const __global uchar4 *, size_t);\n" "void __ovld prefetch(const __global short4 *, size_t);\n" "void __ovld prefetch(const __global ushort4 *, size_t);\n" "void __ovld prefetch(const __global int4 *, size_t);\n" "void __ovld prefetch(const __global uint4 *, size_t);\n" "void __ovld prefetch(const __global long4 *, size_t);\n" "void __ovld prefetch(const __global ulong4 *, size_t);\n" "void __ovld prefetch(const __global float4 *, size_t);\n" "void __ovld prefetch(const __global char8 *, size_t);\n" "void __ovld prefetch(const __global uchar8 *, size_t);\n" "void __ovld prefetch(const __global short8 *, size_t);\n" "void __ovld prefetch(const __global ushort8 *, size_t);\n" "void __ovld prefetch(const __global int8 *, size_t);\n" "void __ovld prefetch(const __global uint8 *, size_t);\n" "void __ovld prefetch(const __global long8 *, size_t);\n" "void __ovld prefetch(const __global ulong8 *, size_t);\n" "void __ovld prefetch(const __global float8 *, size_t);\n" "void __ovld prefetch(const __global char16 *, size_t);\n" "void __ovld prefetch(const __global uchar16 *, size_t);\n" "void __ovld prefetch(const __global short16 *, size_t);\n" "void __ovld prefetch(const __global ushort16 *, size_t);\n" "void __ovld prefetch(const __global int16 *, size_t);\n" "void __ovld prefetch(const __global uint16 *, size_t);\n" "void __ovld prefetch(const __global long16 *, size_t);\n" "void __ovld prefetch(const __global ulong16 *, size_t);\n" "void __ovld prefetch(const __global float16 *, size_t);\n" "#ifdef cl_khr_fp64\n" "void __ovld prefetch(const __global double *, size_t);\n" "void __ovld prefetch(const __global double2 *, size_t);\n" "void __ovld prefetch(const __global double3 *, size_t);\n" "void __ovld prefetch(const __global double4 *, size_t);\n" "void __ovld prefetch(const __global double8 *, size_t);\n" "void __ovld prefetch(const __global double16 *, size_t);\n" "#endif //cl_khr_fp64\n" "#ifdef cl_khr_fp16\n" "void __ovld prefetch(const __global half *, size_t);\n" "void __ovld prefetch(const __global half2 *, size_t);\n" "void __ovld prefetch(const __global half3 *, size_t);\n" "void __ovld prefetch(const __global half4 *, size_t);\n" "void __ovld prefetch(const __global half8 *, size_t);\n" "void __ovld prefetch(const __global half16 *, size_t);\n" "#endif // cl_khr_fp16\n" "\n" "// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions\n" "\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n" "#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable\n" "#endif\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old + val) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_add(volatile __global int *, int);\n" "uint __ovld atomic_add(volatile __global uint *, uint);\n" "int __ovld atomic_add(volatile __local int *, int);\n" "uint __ovld atomic_add(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_add(volatile int *, int);\n" "uint __ovld atomic_add(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_add(volatile __global int *, int);\n" "uint __ovld atom_add(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_add(volatile __local int *, int);\n" "uint __ovld atom_add(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_add(volatile __global long *, long);\n" "ulong __ovld atom_add(volatile __global ulong *, ulong);\n" "long __ovld atom_add(volatile __local long *, long);\n" "ulong __ovld atom_add(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old) stored at location pointed by p.\n" " * Compute (old - val) and store result at location pointed by p. The function\n" " * returns old.\n" " */\n" "int __ovld atomic_sub(volatile __global int *, int);\n" "uint __ovld atomic_sub(volatile __global uint *, uint);\n" "int __ovld atomic_sub(volatile __local int *, int);\n" "uint __ovld atomic_sub(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_sub(volatile int *, int);\n" "uint __ovld atomic_sub(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_sub(volatile __global int *, int);\n" "uint __ovld atom_sub(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_sub(volatile __local int *, int);\n" "uint __ovld atom_sub(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_sub(volatile __global long *, long);\n" "ulong __ovld atom_sub(volatile __global ulong *, ulong);\n" "long __ovld atom_sub(volatile __local long *, long);\n" "ulong __ovld atom_sub(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Swaps the old value stored at location p\n" " * with new value given by val. Returns old\n" " * value.\n" " */\n" "int __ovld atomic_xchg(volatile __global int *, int);\n" "uint __ovld atomic_xchg(volatile __global uint *, uint);\n" "int __ovld atomic_xchg(volatile __local int *, int);\n" "uint __ovld atomic_xchg(volatile __local uint *, uint);\n" "float __ovld atomic_xchg(volatile __global float *, float);\n" "float __ovld atomic_xchg(volatile __local float *, float);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_xchg(volatile int *, int);\n" "uint __ovld atomic_xchg(volatile uint *, uint);\n" "float __ovld atomic_xchg(volatile float *, float);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_xchg(volatile __global int *, int);\n" "uint __ovld atom_xchg(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_xchg(volatile __local int *, int);\n" "uint __ovld atom_xchg(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_xchg(volatile __global long *, long);\n" "long __ovld atom_xchg(volatile __local long *, long);\n" "ulong __ovld atom_xchg(volatile __global ulong *, ulong);\n" "ulong __ovld atom_xchg(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old + 1) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_inc(volatile __global int *);\n" "uint __ovld atomic_inc(volatile __global uint *);\n" "int __ovld atomic_inc(volatile __local int *);\n" "uint __ovld atomic_inc(volatile __local uint *);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_inc(volatile int *);\n" "uint __ovld atomic_inc(volatile uint *);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_inc(volatile __global int *);\n" "uint __ovld atom_inc(volatile __global uint *);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_inc(volatile __local int *);\n" "uint __ovld atom_inc(volatile __local uint *);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_inc(volatile __global long *);\n" "ulong __ovld atom_inc(volatile __global ulong *);\n" "long __ovld atom_inc(volatile __local long *);\n" "ulong __ovld atom_inc(volatile __local ulong *);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old - 1) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_dec(volatile __global int *);\n" "uint __ovld atomic_dec(volatile __global uint *);\n" "int __ovld atomic_dec(volatile __local int *);\n" "uint __ovld atomic_dec(volatile __local uint *);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_dec(volatile int *);\n" "uint __ovld atomic_dec(volatile uint *);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_dec(volatile __global int *);\n" "uint __ovld atom_dec(volatile __global uint *);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_dec(volatile __local int *);\n" "uint __ovld atom_dec(volatile __local uint *);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_dec(volatile __global long *);\n" "ulong __ovld atom_dec(volatile __global ulong *);\n" "long __ovld atom_dec(volatile __local long *);\n" "ulong __ovld atom_dec(volatile __local ulong *);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old == cmp) ? val : old and store result at\n" " * location pointed by p. The function\n" " * returns old.\n" " */\n" "int __ovld atomic_cmpxchg(volatile __global int *, int, int);\n" "uint __ovld atomic_cmpxchg(volatile __global uint *, uint, uint);\n" "int __ovld atomic_cmpxchg(volatile __local int *, int, int);\n" "uint __ovld atomic_cmpxchg(volatile __local uint *, uint, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_cmpxchg(volatile int *, int, int);\n" "uint __ovld atomic_cmpxchg(volatile uint *, uint, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_base_atomics)\n" "int __ovld atom_cmpxchg(volatile __global int *, int, int);\n" "uint __ovld atom_cmpxchg(volatile __global uint *, uint, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_base_atomics)\n" "int __ovld atom_cmpxchg(volatile __local int *, int, int);\n" "uint __ovld atom_cmpxchg(volatile __local uint *, uint, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics)\n" "long __ovld atom_cmpxchg(volatile __global long *, long, long);\n" "ulong __ovld atom_cmpxchg(volatile __global ulong *, ulong, ulong);\n" "long __ovld atom_cmpxchg(volatile __local long *, long, long);\n" "ulong __ovld atom_cmpxchg(volatile __local ulong *, ulong, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * min(old, val) and store minimum value at\n" " * location pointed by p. The function\n" " * returns old.\n" " */\n" "int __ovld atomic_min(volatile __global int *, int);\n" "uint __ovld atomic_min(volatile __global uint *, uint);\n" "int __ovld atomic_min(volatile __local int *, int);\n" "uint __ovld atomic_min(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_min(volatile int *, int);\n" "uint __ovld atomic_min(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_extended_atomics)\n" "int __ovld atom_min(volatile __global int *, int);\n" "uint __ovld atom_min(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_extended_atomics)\n" "int __ovld atom_min(volatile __local int *, int);\n" "uint __ovld atom_min(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_extended_atomics)\n" "long __ovld atom_min(volatile __global long *, long);\n" "ulong __ovld atom_min(volatile __global ulong *, ulong);\n" "long __ovld atom_min(volatile __local long *, long);\n" "ulong __ovld atom_min(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * max(old, val) and store maximum value at\n" " * location pointed by p. The function\n" " * returns old.\n" " */\n" "int __ovld atomic_max(volatile __global int *, int);\n" "uint __ovld atomic_max(volatile __global uint *, uint);\n" "int __ovld atomic_max(volatile __local int *, int);\n" "uint __ovld atomic_max(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_max(volatile int *, int);\n" "uint __ovld atomic_max(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_extended_atomics)\n" "int __ovld atom_max(volatile __global int *, int);\n" "uint __ovld atom_max(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_extended_atomics)\n" "int __ovld atom_max(volatile __local int *, int);\n" "uint __ovld atom_max(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_extended_atomics)\n" "long __ovld atom_max(volatile __global long *, long);\n" "ulong __ovld atom_max(volatile __global ulong *, ulong);\n" "long __ovld atom_max(volatile __local long *, long);\n" "ulong __ovld atom_max(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old & val) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_and(volatile __global int *, int);\n" "uint __ovld atomic_and(volatile __global uint *, uint);\n" "int __ovld atomic_and(volatile __local int *, int);\n" "uint __ovld atomic_and(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_and(volatile int *, int);\n" "uint __ovld atomic_and(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_extended_atomics)\n" "int __ovld atom_and(volatile __global int *, int);\n" "uint __ovld atom_and(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_extended_atomics)\n" "int __ovld atom_and(volatile __local int *, int);\n" "uint __ovld atom_and(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_extended_atomics)\n" "long __ovld atom_and(volatile __global long *, long);\n" "ulong __ovld atom_and(volatile __global ulong *, ulong);\n" "long __ovld atom_and(volatile __local long *, long);\n" "ulong __ovld atom_and(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old | val) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_or(volatile __global int *, int);\n" "uint __ovld atomic_or(volatile __global uint *, uint);\n" "int __ovld atomic_or(volatile __local int *, int);\n" "uint __ovld atomic_or(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_or(volatile int *, int);\n" "uint __ovld atomic_or(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_extended_atomics)\n" "int __ovld atom_or(volatile __global int *, int);\n" "uint __ovld atom_or(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_extended_atomics)\n" "int __ovld atom_or(volatile __local int *, int);\n" "uint __ovld atom_or(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_extended_atomics)\n" "long __ovld atom_or(volatile __global long *, long);\n" "ulong __ovld atom_or(volatile __global ulong *, ulong);\n" "long __ovld atom_or(volatile __local long *, long);\n" "ulong __ovld atom_or(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "/**\n" " * Read the 32-bit value (referred to as old)\n" " * stored at location pointed by p. Compute\n" " * (old ^ val) and store result at location\n" " * pointed by p. The function returns old.\n" " */\n" "int __ovld atomic_xor(volatile __global int *, int);\n" "uint __ovld atomic_xor(volatile __global uint *, uint);\n" "int __ovld atomic_xor(volatile __local int *, int);\n" "uint __ovld atomic_xor(volatile __local uint *, uint);\n" "#ifdef __OPENCL_CPP_VERSION__\n" "int __ovld atomic_xor(volatile int *, int);\n" "uint __ovld atomic_xor(volatile uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_global_int32_extended_atomics)\n" "int __ovld atom_xor(volatile __global int *, int);\n" "uint __ovld atom_xor(volatile __global uint *, uint);\n" "#endif\n" "#if defined(cl_khr_local_int32_extended_atomics)\n" "int __ovld atom_xor(volatile __local int *, int);\n" "uint __ovld atom_xor(volatile __local uint *, uint);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_extended_atomics)\n" "long __ovld atom_xor(volatile __global long *, long);\n" "ulong __ovld atom_xor(volatile __global ulong *, ulong);\n" "long __ovld atom_xor(volatile __local long *, long);\n" "ulong __ovld atom_xor(volatile __local ulong *, ulong);\n" "#endif\n" "\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable\n" "#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable\n" "#endif\n" "\n" "// OpenCL v2.0 s6.13.11 - Atomics Functions\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n" "#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable\n" "#endif\n" "\n" "// atomic_init()\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld atomic_init(volatile atomic_int *, int);\n" "void __ovld atomic_init(volatile atomic_uint *, uint);\n" "void __ovld atomic_init(volatile atomic_float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "void __ovld atomic_init(volatile atomic_long *, long);\n" "void __ovld atomic_init(volatile atomic_ulong *, ulong);\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_init(volatile atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "void __ovld atomic_init(volatile __global atomic_int *, int);\n" "void __ovld atomic_init(volatile __local atomic_int *, int);\n" "void __ovld atomic_init(volatile __global atomic_uint *, uint);\n" "void __ovld atomic_init(volatile __local atomic_uint *, uint);\n" "void __ovld atomic_init(volatile __global atomic_float *, float);\n" "void __ovld atomic_init(volatile __local atomic_float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "void __ovld atomic_init(volatile __global atomic_long *, long);\n" "void __ovld atomic_init(volatile __local atomic_long *, long);\n" "void __ovld atomic_init(volatile __global atomic_ulong *, ulong);\n" "void __ovld atomic_init(volatile __local atomic_ulong *, ulong);\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_init(volatile __global atomic_double *, double);\n" "void __ovld atomic_init(volatile __local atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "#endif\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// atomic_work_item_fence()\n" "void __ovld atomic_work_item_fence(cl_mem_fence_flags, memory_order, memory_scope);\n" "\n" "// atomic_fetch()\n" "// OpenCL v2.0 s6.13.11.7.5:\n" "// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.\n" "\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_fetch_add(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_add(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_sub(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_sub(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_or(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_or(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_xor(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_xor(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_and(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_and(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_min(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_min(volatile atomic_uint *, uint);\n" "int __ovld atomic_fetch_max(volatile atomic_int *, int);\n" "uint __ovld atomic_fetch_max(volatile atomic_uint *, uint);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_add(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_sub(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_sub(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_or(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_or(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_xor(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_xor(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_and(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_and(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_min(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_min(volatile atomic_ulong *, ulong);\n" "long __ovld atomic_fetch_max(volatile atomic_long *, long);\n" "ulong __ovld atomic_fetch_max(volatile atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *, ptrdiff_t);\n" "uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *, ptrdiff_t);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_fetch_add(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_add(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_add(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_add(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_sub(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_sub(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_sub(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_sub(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_or(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_or(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_or(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_or(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_xor(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_xor(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_xor(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_xor(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_and(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_and(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_and(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_and(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_min(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_min(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_min(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_min(volatile __local atomic_uint *, uint);\n" "int __ovld atomic_fetch_max(volatile __global atomic_int *, int);\n" "int __ovld atomic_fetch_max(volatile __local atomic_int *, int);\n" "uint __ovld atomic_fetch_max(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_fetch_max(volatile __local atomic_uint *, uint);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_add(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *, ptrdiff_t);\n" "uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *, ptrdiff_t);\n" "long __ovld atomic_fetch_sub(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_sub(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *, ptrdiff_t);\n" "uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *, ptrdiff_t);\n" "long __ovld atomic_fetch_or(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_or(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *, intptr_t);\n" "uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *, intptr_t);\n" "intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *, uintptr_t);\n" "intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *, uintptr_t);\n" "long __ovld atomic_fetch_xor(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_xor(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *, intptr_t);\n" "uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *, intptr_t);\n" "intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *, uintptr_t);\n" "intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *, uintptr_t);\n" "long __ovld atomic_fetch_and(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_and(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *, intptr_t);\n" "uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *, intptr_t);\n" "intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *, uintptr_t);\n" "intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *, uintptr_t);\n" "long __ovld atomic_fetch_min(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_min(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *, intptr_t);\n" "uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *, intptr_t);\n" "intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *, uintptr_t);\n" "intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *, uintptr_t);\n" "long __ovld atomic_fetch_max(volatile __global atomic_long *, long);\n" "long __ovld atomic_fetch_max(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *, ulong);\n" "uintptr_t __ovld atomic_fetch_max(volatile __global atomic_uintptr_t *, uintptr_t);\n" "uintptr_t __ovld atomic_fetch_max(volatile __local atomic_uintptr_t *, uintptr_t);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);\n" "long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);\n" "long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);\n" "long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);\n" "long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);\n" "intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);\n" "long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order);\n" "uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);\n" "long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);\n" "long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);\n" "uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// The functionality added by cl_ext_float_atomics extension\n" "#if defined(cl_ext_float_atomics)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_load_store)\n" "void __ovld atomic_store(volatile __global atomic_half *, half);\n" "void __ovld atomic_store_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "void __ovld atomic_store_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_load(volatile __global atomic_half *);\n" "half __ovld atomic_load_explicit(volatile __global atomic_half *,\n" " memory_order);\n" "half __ovld atomic_load_explicit(volatile __global atomic_half *,\n" " memory_order, memory_scope);\n" "half __ovld atomic_exchange(volatile __global atomic_half *, half);\n" "half __ovld atomic_exchange_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_exchange_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)\n" "\n" "#if defined(__opencl_c_ext_fp16_local_atomic_load_store)\n" "void __ovld atomic_store(volatile __local atomic_half *, half);\n" "void __ovld atomic_store_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_load(volatile __local atomic_half *);\n" "half __ovld atomic_load_explicit(volatile __local atomic_half *,\n" " memory_order);\n" "half __ovld atomic_load_explicit(volatile __local atomic_half *,\n" " memory_order, memory_scope);\n" "half __ovld atomic_exchange(volatile __local atomic_half *, half);\n" "half __ovld atomic_exchange_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_exchange_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_load_store) && \\\n" " defined(__opencl_c_ext_fp16_local_atomic_load_store)\n" "void __ovld atomic_store(volatile atomic_half *, half);\n" "void __ovld atomic_store_explicit(volatile atomic_half *, half,\n" " memory_order);\n" "void __ovld atomic_store_explicit(volatile atomic_half *, half,\n" " memory_order, memory_scope);\n" "half __ovld atomic_load(volatile atomic_half *);\n" "half __ovld atomic_load_explicit(volatile atomic_half *,\n" " memory_order);\n" "half __ovld atomic_load_explicit(volatile atomic_half *,\n" " memory_order, memory_scope);\n" "half __ovld atomic_exchange(volatile atomic_half *, half);\n" "half __ovld atomic_exchange_explicit(volatile atomic_half *, half,\n" " memory_order);\n" "half __ovld atomic_exchange_explicit(volatile atomic_half *, half,\n" " memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&\n" " // defined(__opencl_c_ext_fp16_local_atomic_load_store)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_min_max)\n" "half __ovld atomic_fetch_min(volatile __global atomic_half *, half);\n" "half __ovld atomic_fetch_max(volatile __global atomic_half *, half);\n" "half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp16_local_atomic_min_max)\n" "half __ovld atomic_fetch_min(volatile __local atomic_half *, half);\n" "half __ovld atomic_fetch_max(volatile __local atomic_half *, half);\n" "half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp16_local_atomic_min_max)\n" "half __ovld atomic_fetch_min(volatile atomic_half *, half);\n" "half __ovld atomic_fetch_max(volatile atomic_half *, half);\n" "half __ovld atomic_fetch_min_explicit(volatile atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_max_explicit(volatile atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_min_explicit(volatile atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_max_explicit(volatile atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp16_local_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp32_global_atomic_min_max)\n" "float __ovld atomic_fetch_min(volatile __global atomic_float *, float);\n" "float __ovld atomic_fetch_max(volatile __global atomic_float *, float);\n" "float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp32_local_atomic_min_max)\n" "float __ovld atomic_fetch_min(volatile __local atomic_float *, float);\n" "float __ovld atomic_fetch_max(volatile __local atomic_float *, float);\n" "float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp32_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp32_local_atomic_min_max)\n" "float __ovld atomic_fetch_min(volatile atomic_float *, float);\n" "float __ovld atomic_fetch_max(volatile atomic_float *, float);\n" "float __ovld atomic_fetch_min_explicit(volatile atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_max_explicit(volatile atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_min_explicit(volatile atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_max_explicit(volatile atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp32_local_atomic_min_max)\n" "\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#if defined(__opencl_c_ext_fp64_global_atomic_min_max)\n" "double __ovld atomic_fetch_min(volatile __global atomic_double *, double);\n" "double __ovld atomic_fetch_max(volatile __global atomic_double *, double);\n" "double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp64_local_atomic_min_max)\n" "double __ovld atomic_fetch_min(volatile __local atomic_double *, double);\n" "double __ovld atomic_fetch_max(volatile __local atomic_double *, double);\n" "double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)\n" "\n" "#if defined(__opencl_c_ext_fp64_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp64_local_atomic_min_max)\n" "double __ovld atomic_fetch_min(volatile atomic_double *, double);\n" "double __ovld atomic_fetch_max(volatile atomic_double *, double);\n" "double __ovld atomic_fetch_min_explicit(volatile atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_max_explicit(volatile atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_min_explicit(volatile atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_max_explicit(volatile atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) && \\\n" " defined(__opencl_c_ext_fp64_local_atomic_min_max)\n" "#endif // defined(cl_khr_int64_base_atomics) && \\\n" " defined(cl_khr_int64_extended_atomics)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_add)\n" "half __ovld atomic_fetch_add(volatile __global atomic_half *, half);\n" "half __ovld atomic_fetch_sub(volatile __global atomic_half *, half);\n" "half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp16_local_atomic_add)\n" "half __ovld atomic_fetch_add(volatile __local atomic_half *, half);\n" "half __ovld atomic_fetch_sub(volatile __local atomic_half *, half);\n" "half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_local_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp16_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp16_local_atomic_add)\n" "half __ovld atomic_fetch_add(volatile atomic_half *, half);\n" "half __ovld atomic_fetch_sub(volatile atomic_half *, half);\n" "half __ovld atomic_fetch_add_explicit(volatile atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,\n" " half, memory_order);\n" "half __ovld atomic_fetch_add_explicit(volatile atomic_half *,\n" " half, memory_order, memory_scope);\n" "half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,\n" " half, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp16_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp16_local_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp32_global_atomic_add)\n" "float __ovld atomic_fetch_add(volatile __global atomic_float *, float);\n" "float __ovld atomic_fetch_sub(volatile __global atomic_float *, float);\n" "float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_global_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp32_local_atomic_add)\n" "float __ovld atomic_fetch_add(volatile __local atomic_float *, float);\n" "float __ovld atomic_fetch_sub(volatile __local atomic_float *, float);\n" "float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_local_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp32_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp32_local_atomic_add)\n" "float __ovld atomic_fetch_add(volatile atomic_float *, float);\n" "float __ovld atomic_fetch_sub(volatile atomic_float *, float);\n" "float __ovld atomic_fetch_add_explicit(volatile atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,\n" " float, memory_order);\n" "float __ovld atomic_fetch_add_explicit(volatile atomic_float *,\n" " float, memory_order, memory_scope);\n" "float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,\n" " float, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp32_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp32_local_atomic_add)\n" "\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#if defined(__opencl_c_ext_fp64_global_atomic_add)\n" "double __ovld atomic_fetch_add(volatile __global atomic_double *, double);\n" "double __ovld atomic_fetch_sub(volatile __global atomic_double *, double);\n" "double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_global_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp64_local_atomic_add)\n" "double __ovld atomic_fetch_add(volatile __local atomic_double *, double);\n" "double __ovld atomic_fetch_sub(volatile __local atomic_double *, double);\n" "double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_local_atomic_add)\n" "\n" "#if defined(__opencl_c_ext_fp64_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp64_local_atomic_add)\n" "double __ovld atomic_fetch_add(volatile atomic_double *, double);\n" "double __ovld atomic_fetch_sub(volatile atomic_double *, double);\n" "double __ovld atomic_fetch_add_explicit(volatile atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,\n" " double, memory_order);\n" "double __ovld atomic_fetch_add_explicit(volatile atomic_double *,\n" " double, memory_order, memory_scope);\n" "double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,\n" " double, memory_order, memory_scope);\n" "#endif // defined(__opencl_c_ext_fp64_global_atomic_add) && \\\n" " defined(__opencl_c_ext_fp64_local_atomic_add)\n" "#endif // defined(cl_khr_int64_base_atomics) && \\\n" " defined(cl_khr_int64_extended_atomics)\n" "\n" "#endif // cl_ext_float_atomics\n" "\n" "// atomic_store()\n" "\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld atomic_store(volatile atomic_int *, int);\n" "void __ovld atomic_store(volatile atomic_uint *, uint);\n" "void __ovld atomic_store(volatile atomic_float *, float);\n" "\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store(volatile atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "void __ovld atomic_store(volatile atomic_long *, long);\n" "void __ovld atomic_store(volatile atomic_ulong *, ulong);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "void __ovld atomic_store(volatile __global atomic_int *, int);\n" "void __ovld atomic_store(volatile __local atomic_int *, int);\n" "void __ovld atomic_store(volatile __global atomic_uint *, uint);\n" "void __ovld atomic_store(volatile __local atomic_uint *, uint);\n" "void __ovld atomic_store(volatile __global atomic_float *, float);\n" "void __ovld atomic_store(volatile __local atomic_float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store(volatile __global atomic_double *, double);\n" "void __ovld atomic_store(volatile __local atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "void __ovld atomic_store(volatile __global atomic_long *, long);\n" "void __ovld atomic_store(volatile __local atomic_long *, long);\n" "void __ovld atomic_store(volatile __global atomic_ulong *, ulong);\n" "void __ovld atomic_store(volatile __local atomic_ulong *, ulong);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order);\n" "void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order);\n" "void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order);\n" "#endif //cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order);\n" "void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order);\n" "void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order);\n" "#endif\n" "void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order);\n" "void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// atomic_load()\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_load(volatile atomic_int *);\n" "uint __ovld atomic_load(volatile atomic_uint *);\n" "float __ovld atomic_load(volatile atomic_float *);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load(volatile atomic_double *);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_load(volatile atomic_long *);\n" "ulong __ovld atomic_load(volatile atomic_ulong *);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_load(volatile __global atomic_int *);\n" "int __ovld atomic_load(volatile __local atomic_int *);\n" "uint __ovld atomic_load(volatile __global atomic_uint *);\n" "uint __ovld atomic_load(volatile __local atomic_uint *);\n" "float __ovld atomic_load(volatile __global atomic_float *);\n" "float __ovld atomic_load(volatile __local atomic_float *);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load(volatile __global atomic_double *);\n" "double __ovld atomic_load(volatile __local atomic_double *);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_load(volatile __global atomic_long *);\n" "long __ovld atomic_load(volatile __local atomic_long *);\n" "ulong __ovld atomic_load(volatile __global atomic_ulong *);\n" "ulong __ovld atomic_load(volatile __local atomic_ulong *);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_load_explicit(volatile atomic_int *, memory_order);\n" "uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order);\n" "float __ovld atomic_load_explicit(volatile atomic_float *, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load_explicit(volatile atomic_double *, memory_order);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_load_explicit(volatile atomic_long *, memory_order);\n" "ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order);\n" "int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order);\n" "uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order);\n" "uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order);\n" "float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order);\n" "float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order);\n" "double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order);\n" "long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order);\n" "ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order);\n" "ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_load_explicit(volatile atomic_int *, memory_order, memory_scope);\n" "uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order, memory_scope);\n" "float __ovld atomic_load_explicit(volatile atomic_float *, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load_explicit(volatile atomic_double *, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_load_explicit(volatile atomic_long *, memory_order, memory_scope);\n" "ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order, memory_scope);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order, memory_scope);\n" "int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order, memory_scope);\n" "uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order, memory_scope);\n" "uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order, memory_scope);\n" "float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order, memory_scope);\n" "float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order, memory_scope);\n" "double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order, memory_scope);\n" "#endif\n" "long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order, memory_scope);\n" "long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order, memory_scope);\n" "ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order, memory_scope);\n" "ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order, memory_scope);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// atomic_exchange()\n" "\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_exchange(volatile atomic_int *, int);\n" "uint __ovld atomic_exchange(volatile atomic_uint *, uint);\n" "float __ovld atomic_exchange(volatile atomic_float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange(volatile atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange(volatile atomic_long *, long);\n" "ulong __ovld atomic_exchange(volatile atomic_ulong *, ulong);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_exchange(volatile __global atomic_int *, int);\n" "int __ovld atomic_exchange(volatile __local atomic_int *, int);\n" "uint __ovld atomic_exchange(volatile __global atomic_uint *, uint);\n" "uint __ovld atomic_exchange(volatile __local atomic_uint *, uint);\n" "float __ovld atomic_exchange(volatile __global atomic_float *, float);\n" "float __ovld atomic_exchange(volatile __local atomic_float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange(volatile __global atomic_double *, double);\n" "double __ovld atomic_exchange(volatile __local atomic_double *, double);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange(volatile __global atomic_long *, long);\n" "long __ovld atomic_exchange(volatile __local atomic_long *, long);\n" "ulong __ovld atomic_exchange(volatile __global atomic_ulong *, ulong);\n" "ulong __ovld atomic_exchange(volatile __local atomic_ulong *, ulong);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order);\n" "uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order);\n" "float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order);\n" "ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order);\n" "int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order);\n" "uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order);\n" "uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order);\n" "float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order);\n" "float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order);\n" "double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order);\n" "long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order);\n" "ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order);\n" "ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);\n" "float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);\n" "int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);\n" "uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);\n" "uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);\n" "float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);\n" "float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);\n" "double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);\n" "long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);\n" "ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);\n" "ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_int *, int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *, uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_int *, int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *, uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_float *, float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_float *, float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_double *, double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_double *, double *, double);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_long *, long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_long *, long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *, ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *, ulong *, ulong);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __global int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __local int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __private int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __global int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __local int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __private int *, int);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __global uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __local uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __private uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __global uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __local uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __private uint *, uint);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __global float *, float);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __local float *, float);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __private float *, float);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __global float *, float);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __local float *, float);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __private float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __global int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __local int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __private int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __global int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __local int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __private int *, int);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __global uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __local uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __private uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __global uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __local uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __private uint *, uint);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __global float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __local float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __private float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __global float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __local float *, float);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __private float *, float);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __global double *, double);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __local double *, double);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __private double *, double);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __global double *, double);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __local double *, double);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __private double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __global double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __local double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __private double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __global double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __local double *, double);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __private double *, double);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __global long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __local long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __private long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __global long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __local long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __private long *, long);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __global ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __local ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __private ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __global ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __local ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __private ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __global long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __local long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __private long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __global long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __local long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __private long *, long);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __global ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __local ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __private ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __global ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __local ulong *, ulong);\n" "bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __private ulong *, ulong);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif //defined(__opencl_c_atomic_scope_device)\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);\n" "#endif\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);\n" "#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#ifdef cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);\n" "#endif //cl_khr_fp64\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);\n" "bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);\n" "#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "\n" "// atomic_flag_test_and_set() and atomic_flag_clear()\n" "#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_flag_test_and_set(volatile atomic_flag *);\n" "void __ovld atomic_flag_clear(volatile atomic_flag *);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *);\n" "bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *);\n" "void __ovld atomic_flag_clear(volatile __global atomic_flag *);\n" "void __ovld atomic_flag_clear(volatile __local atomic_flag *);\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_atomic_scope_device)\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);\n" "void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order);\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order);\n" "void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order);\n" "void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order);\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif\n" "\n" "#if defined(__opencl_c_generic_address_space)\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order, memory_scope);\n" "void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order, memory_scope);\n" "#endif //defined(__opencl_c_generic_address_space)\n" "#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order, memory_scope);\n" "bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order, memory_scope);\n" "void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order, memory_scope);\n" "void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order, memory_scope);\n" "#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions\n" "\n" "/**\n" " * The shuffle and shuffle2 built-in functions construct\n" " * a permutation of elements from one or two input\n" " * vectors respectively that are of the same type,\n" " * returning a vector with the same element type as the\n" " * input and length that is the same as the shuffle mask.\n" " * The size of each element in the mask must match the\n" " * size of each element in the result. For shuffle, only\n" " * the ilogb(2m-1) least significant bits of each mask\n" " * element are considered. For shuffle2, only the\n" " * ilogb(2m-1)+1 least significant bits of each mask\n" " * element are considered. Other bits in the mask shall\n" " * be ignored.\n" " * The elements of the input vectors are numbered from\n" " * left to right across one or both of the vectors. For this\n" " * purpose, the number of elements in a vector is given\n" " * by vec_step(gentypem). The shuffle mask operand\n" " * specifies, for each element of the result vector, which\n" " * element of the one or two input vectors the result\n" " * element gets.\n" " * Examples:\n" " * uint4 mask = (uint4)(3, 2,\n" " * 1, 0);\n" " * float4 a;\n" " * float4 r = shuffle(a, mask);\n" " * // r.s0123 = a.wzyx\n" " * uint8 mask = (uint8)(0, 1, 2, 3,\n" " * 4, 5, 6, 7);\n" " * float4 a, b;\n" " * float8 r = shuffle2(a, b, mask);\n" " * // r.s0123 = a.xyzw\n" " * // r.s4567 = b.xyzw\n" " * uint4 mask;\n" " * float8 a;\n" " * float4 b;\n" " * b = shuffle(a, mask);\n" " * Examples that are not valid are:\n" " * uint8 mask;\n" " * short16 a;\n" " * short8 b;\n" " * b = shuffle(a, mask); <- not valid\n" " */\n" "char2 __ovld __cnfn shuffle(char2, uchar2);\n" "char2 __ovld __cnfn shuffle(char4, uchar2);\n" "char2 __ovld __cnfn shuffle(char8, uchar2);\n" "char2 __ovld __cnfn shuffle(char16, uchar2);\n" "\n" "uchar2 __ovld __cnfn shuffle(uchar2, uchar2);\n" "uchar2 __ovld __cnfn shuffle(uchar4, uchar2);\n" "uchar2 __ovld __cnfn shuffle(uchar8, uchar2);\n" "uchar2 __ovld __cnfn shuffle(uchar16, uchar2);\n" "\n" "short2 __ovld __cnfn shuffle(short2, ushort2);\n" "short2 __ovld __cnfn shuffle(short4, ushort2);\n" "short2 __ovld __cnfn shuffle(short8, ushort2);\n" "short2 __ovld __cnfn shuffle(short16, ushort2);\n" "\n" "ushort2 __ovld __cnfn shuffle(ushort2, ushort2);\n" "ushort2 __ovld __cnfn shuffle(ushort4, ushort2);\n" "ushort2 __ovld __cnfn shuffle(ushort8, ushort2);\n" "ushort2 __ovld __cnfn shuffle(ushort16, ushort2);\n" "\n" "int2 __ovld __cnfn shuffle(int2, uint2);\n" "int2 __ovld __cnfn shuffle(int4, uint2);\n" "int2 __ovld __cnfn shuffle(int8, uint2);\n" "int2 __ovld __cnfn shuffle(int16, uint2);\n" "\n" "uint2 __ovld __cnfn shuffle(uint2, uint2);\n" "uint2 __ovld __cnfn shuffle(uint4, uint2);\n" "uint2 __ovld __cnfn shuffle(uint8, uint2);\n" "uint2 __ovld __cnfn shuffle(uint16, uint2);\n" "\n" "long2 __ovld __cnfn shuffle(long2, ulong2);\n" "long2 __ovld __cnfn shuffle(long4, ulong2);\n" "long2 __ovld __cnfn shuffle(long8, ulong2);\n" "long2 __ovld __cnfn shuffle(long16, ulong2);\n" "\n" "ulong2 __ovld __cnfn shuffle(ulong2, ulong2);\n" "ulong2 __ovld __cnfn shuffle(ulong4, ulong2);\n" "ulong2 __ovld __cnfn shuffle(ulong8, ulong2);\n" "ulong2 __ovld __cnfn shuffle(ulong16, ulong2);\n" "\n" "float2 __ovld __cnfn shuffle(float2, uint2);\n" "float2 __ovld __cnfn shuffle(float4, uint2);\n" "float2 __ovld __cnfn shuffle(float8, uint2);\n" "float2 __ovld __cnfn shuffle(float16, uint2);\n" "\n" "char4 __ovld __cnfn shuffle(char2, uchar4);\n" "char4 __ovld __cnfn shuffle(char4, uchar4);\n" "char4 __ovld __cnfn shuffle(char8, uchar4);\n" "char4 __ovld __cnfn shuffle(char16, uchar4);\n" "\n" "uchar4 __ovld __cnfn shuffle(uchar2, uchar4);\n" "uchar4 __ovld __cnfn shuffle(uchar4, uchar4);\n" "uchar4 __ovld __cnfn shuffle(uchar8, uchar4);\n" "uchar4 __ovld __cnfn shuffle(uchar16, uchar4);\n" "\n" "short4 __ovld __cnfn shuffle(short2, ushort4);\n" "short4 __ovld __cnfn shuffle(short4, ushort4);\n" "short4 __ovld __cnfn shuffle(short8, ushort4);\n" "short4 __ovld __cnfn shuffle(short16, ushort4);\n" "\n" "ushort4 __ovld __cnfn shuffle(ushort2, ushort4);\n" "ushort4 __ovld __cnfn shuffle(ushort4, ushort4);\n" "ushort4 __ovld __cnfn shuffle(ushort8, ushort4);\n" "ushort4 __ovld __cnfn shuffle(ushort16, ushort4);\n" "\n" "int4 __ovld __cnfn shuffle(int2, uint4);\n" "int4 __ovld __cnfn shuffle(int4, uint4);\n" "int4 __ovld __cnfn shuffle(int8, uint4);\n" "int4 __ovld __cnfn shuffle(int16, uint4);\n" "\n" "uint4 __ovld __cnfn shuffle(uint2, uint4);\n" "uint4 __ovld __cnfn shuffle(uint4, uint4);\n" "uint4 __ovld __cnfn shuffle(uint8, uint4);\n" "uint4 __ovld __cnfn shuffle(uint16, uint4);\n" "\n" "long4 __ovld __cnfn shuffle(long2, ulong4);\n" "long4 __ovld __cnfn shuffle(long4, ulong4);\n" "long4 __ovld __cnfn shuffle(long8, ulong4);\n" "long4 __ovld __cnfn shuffle(long16, ulong4);\n" "\n" "ulong4 __ovld __cnfn shuffle(ulong2, ulong4);\n" "ulong4 __ovld __cnfn shuffle(ulong4, ulong4);\n" "ulong4 __ovld __cnfn shuffle(ulong8, ulong4);\n" "ulong4 __ovld __cnfn shuffle(ulong16, ulong4);\n" "\n" "float4 __ovld __cnfn shuffle(float2, uint4);\n" "float4 __ovld __cnfn shuffle(float4, uint4);\n" "float4 __ovld __cnfn shuffle(float8, uint4);\n" "float4 __ovld __cnfn shuffle(float16, uint4);\n" "\n" "char8 __ovld __cnfn shuffle(char2, uchar8);\n" "char8 __ovld __cnfn shuffle(char4, uchar8);\n" "char8 __ovld __cnfn shuffle(char8, uchar8);\n" "char8 __ovld __cnfn shuffle(char16, uchar8);\n" "\n" "uchar8 __ovld __cnfn shuffle(uchar2, uchar8);\n" "uchar8 __ovld __cnfn shuffle(uchar4, uchar8);\n" "uchar8 __ovld __cnfn shuffle(uchar8, uchar8);\n" "uchar8 __ovld __cnfn shuffle(uchar16, uchar8);\n" "\n" "short8 __ovld __cnfn shuffle(short2, ushort8);\n" "short8 __ovld __cnfn shuffle(short4, ushort8);\n" "short8 __ovld __cnfn shuffle(short8, ushort8);\n" "short8 __ovld __cnfn shuffle(short16, ushort8);\n" "\n" "ushort8 __ovld __cnfn shuffle(ushort2, ushort8);\n" "ushort8 __ovld __cnfn shuffle(ushort4, ushort8);\n" "ushort8 __ovld __cnfn shuffle(ushort8, ushort8);\n" "ushort8 __ovld __cnfn shuffle(ushort16, ushort8);\n" "\n" "int8 __ovld __cnfn shuffle(int2, uint8);\n" "int8 __ovld __cnfn shuffle(int4, uint8);\n" "int8 __ovld __cnfn shuffle(int8, uint8);\n" "int8 __ovld __cnfn shuffle(int16, uint8);\n" "\n" "uint8 __ovld __cnfn shuffle(uint2, uint8);\n" "uint8 __ovld __cnfn shuffle(uint4, uint8);\n" "uint8 __ovld __cnfn shuffle(uint8, uint8);\n" "uint8 __ovld __cnfn shuffle(uint16, uint8);\n" "\n" "long8 __ovld __cnfn shuffle(long2, ulong8);\n" "long8 __ovld __cnfn shuffle(long4, ulong8);\n" "long8 __ovld __cnfn shuffle(long8, ulong8);\n" "long8 __ovld __cnfn shuffle(long16, ulong8);\n" "\n" "ulong8 __ovld __cnfn shuffle(ulong2, ulong8);\n" "ulong8 __ovld __cnfn shuffle(ulong4, ulong8);\n" "ulong8 __ovld __cnfn shuffle(ulong8, ulong8);\n" "ulong8 __ovld __cnfn shuffle(ulong16, ulong8);\n" "\n" "float8 __ovld __cnfn shuffle(float2, uint8);\n" "float8 __ovld __cnfn shuffle(float4, uint8);\n" "float8 __ovld __cnfn shuffle(float8, uint8);\n" "float8 __ovld __cnfn shuffle(float16, uint8);\n" "\n" "char16 __ovld __cnfn shuffle(char2, uchar16);\n" "char16 __ovld __cnfn shuffle(char4, uchar16);\n" "char16 __ovld __cnfn shuffle(char8, uchar16);\n" "char16 __ovld __cnfn shuffle(char16, uchar16);\n" "\n" "uchar16 __ovld __cnfn shuffle(uchar2, uchar16);\n" "uchar16 __ovld __cnfn shuffle(uchar4, uchar16);\n" "uchar16 __ovld __cnfn shuffle(uchar8, uchar16);\n" "uchar16 __ovld __cnfn shuffle(uchar16, uchar16);\n" "\n" "short16 __ovld __cnfn shuffle(short2, ushort16);\n" "short16 __ovld __cnfn shuffle(short4, ushort16);\n" "short16 __ovld __cnfn shuffle(short8, ushort16);\n" "short16 __ovld __cnfn shuffle(short16, ushort16);\n" "\n" "ushort16 __ovld __cnfn shuffle(ushort2, ushort16);\n" "ushort16 __ovld __cnfn shuffle(ushort4, ushort16);\n" "ushort16 __ovld __cnfn shuffle(ushort8, ushort16);\n" "ushort16 __ovld __cnfn shuffle(ushort16, ushort16);\n" "\n" "int16 __ovld __cnfn shuffle(int2, uint16);\n" "int16 __ovld __cnfn shuffle(int4, uint16);\n" "int16 __ovld __cnfn shuffle(int8, uint16);\n" "int16 __ovld __cnfn shuffle(int16, uint16);\n" "\n" "uint16 __ovld __cnfn shuffle(uint2, uint16);\n" "uint16 __ovld __cnfn shuffle(uint4, uint16);\n" "uint16 __ovld __cnfn shuffle(uint8, uint16);\n" "uint16 __ovld __cnfn shuffle(uint16, uint16);\n" "\n" "long16 __ovld __cnfn shuffle(long2, ulong16);\n" "long16 __ovld __cnfn shuffle(long4, ulong16);\n" "long16 __ovld __cnfn shuffle(long8, ulong16);\n" "long16 __ovld __cnfn shuffle(long16, ulong16);\n" "\n" "ulong16 __ovld __cnfn shuffle(ulong2, ulong16);\n" "ulong16 __ovld __cnfn shuffle(ulong4, ulong16);\n" "ulong16 __ovld __cnfn shuffle(ulong8, ulong16);\n" "ulong16 __ovld __cnfn shuffle(ulong16, ulong16);\n" "\n" "float16 __ovld __cnfn shuffle(float2, uint16);\n" "float16 __ovld __cnfn shuffle(float4, uint16);\n" "float16 __ovld __cnfn shuffle(float8, uint16);\n" "float16 __ovld __cnfn shuffle(float16, uint16);\n" "\n" "#ifdef cl_khr_fp64\n" "double2 __ovld __cnfn shuffle(double2, ulong2);\n" "double2 __ovld __cnfn shuffle(double4, ulong2);\n" "double2 __ovld __cnfn shuffle(double8, ulong2);\n" "double2 __ovld __cnfn shuffle(double16, ulong2);\n" "\n" "double4 __ovld __cnfn shuffle(double2, ulong4);\n" "double4 __ovld __cnfn shuffle(double4, ulong4);\n" "double4 __ovld __cnfn shuffle(double8, ulong4);\n" "double4 __ovld __cnfn shuffle(double16, ulong4);\n" "\n" "double8 __ovld __cnfn shuffle(double2, ulong8);\n" "double8 __ovld __cnfn shuffle(double4, ulong8);\n" "double8 __ovld __cnfn shuffle(double8, ulong8);\n" "double8 __ovld __cnfn shuffle(double16, ulong8);\n" "\n" "double16 __ovld __cnfn shuffle(double2, ulong16);\n" "double16 __ovld __cnfn shuffle(double4, ulong16);\n" "double16 __ovld __cnfn shuffle(double8, ulong16);\n" "double16 __ovld __cnfn shuffle(double16, ulong16);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half2 __ovld __cnfn shuffle(half2, ushort2);\n" "half2 __ovld __cnfn shuffle(half4, ushort2);\n" "half2 __ovld __cnfn shuffle(half8, ushort2);\n" "half2 __ovld __cnfn shuffle(half16, ushort2);\n" "\n" "half4 __ovld __cnfn shuffle(half2, ushort4);\n" "half4 __ovld __cnfn shuffle(half4, ushort4);\n" "half4 __ovld __cnfn shuffle(half8, ushort4);\n" "half4 __ovld __cnfn shuffle(half16, ushort4);\n" "\n" "half8 __ovld __cnfn shuffle(half2, ushort8);\n" "half8 __ovld __cnfn shuffle(half4, ushort8);\n" "half8 __ovld __cnfn shuffle(half8, ushort8);\n" "half8 __ovld __cnfn shuffle(half16, ushort8);\n" "\n" "half16 __ovld __cnfn shuffle(half2, ushort16);\n" "half16 __ovld __cnfn shuffle(half4, ushort16);\n" "half16 __ovld __cnfn shuffle(half8, ushort16);\n" "half16 __ovld __cnfn shuffle(half16, ushort16);\n" "#endif //cl_khr_fp16\n" "\n" "char2 __ovld __cnfn shuffle2(char2, char2, uchar2);\n" "char2 __ovld __cnfn shuffle2(char4, char4, uchar2);\n" "char2 __ovld __cnfn shuffle2(char8, char8, uchar2);\n" "char2 __ovld __cnfn shuffle2(char16, char16, uchar2);\n" "\n" "uchar2 __ovld __cnfn shuffle2(uchar2, uchar2, uchar2);\n" "uchar2 __ovld __cnfn shuffle2(uchar4, uchar4, uchar2);\n" "uchar2 __ovld __cnfn shuffle2(uchar8, uchar8, uchar2);\n" "uchar2 __ovld __cnfn shuffle2(uchar16, uchar16, uchar2);\n" "\n" "short2 __ovld __cnfn shuffle2(short2, short2, ushort2);\n" "short2 __ovld __cnfn shuffle2(short4, short4, ushort2);\n" "short2 __ovld __cnfn shuffle2(short8, short8, ushort2);\n" "short2 __ovld __cnfn shuffle2(short16, short16, ushort2);\n" "\n" "ushort2 __ovld __cnfn shuffle2(ushort2, ushort2, ushort2);\n" "ushort2 __ovld __cnfn shuffle2(ushort4, ushort4, ushort2);\n" "ushort2 __ovld __cnfn shuffle2(ushort8, ushort8, ushort2);\n" "ushort2 __ovld __cnfn shuffle2(ushort16, ushort16, ushort2);\n" "\n" "int2 __ovld __cnfn shuffle2(int2, int2, uint2);\n" "int2 __ovld __cnfn shuffle2(int4, int4, uint2);\n" "int2 __ovld __cnfn shuffle2(int8, int8, uint2);\n" "int2 __ovld __cnfn shuffle2(int16, int16, uint2);\n" "\n" "uint2 __ovld __cnfn shuffle2(uint2, uint2, uint2);\n" "uint2 __ovld __cnfn shuffle2(uint4, uint4, uint2);\n" "uint2 __ovld __cnfn shuffle2(uint8, uint8, uint2);\n" "uint2 __ovld __cnfn shuffle2(uint16, uint16, uint2);\n" "\n" "long2 __ovld __cnfn shuffle2(long2, long2, ulong2);\n" "long2 __ovld __cnfn shuffle2(long4, long4, ulong2);\n" "long2 __ovld __cnfn shuffle2(long8, long8, ulong2);\n" "long2 __ovld __cnfn shuffle2(long16, long16, ulong2);\n" "\n" "ulong2 __ovld __cnfn shuffle2(ulong2, ulong2, ulong2);\n" "ulong2 __ovld __cnfn shuffle2(ulong4, ulong4, ulong2);\n" "ulong2 __ovld __cnfn shuffle2(ulong8, ulong8, ulong2);\n" "ulong2 __ovld __cnfn shuffle2(ulong16, ulong16, ulong2);\n" "\n" "float2 __ovld __cnfn shuffle2(float2, float2, uint2);\n" "float2 __ovld __cnfn shuffle2(float4, float4, uint2);\n" "float2 __ovld __cnfn shuffle2(float8, float8, uint2);\n" "float2 __ovld __cnfn shuffle2(float16, float16, uint2);\n" "\n" "char4 __ovld __cnfn shuffle2(char2, char2, uchar4);\n" "char4 __ovld __cnfn shuffle2(char4, char4, uchar4);\n" "char4 __ovld __cnfn shuffle2(char8, char8, uchar4);\n" "char4 __ovld __cnfn shuffle2(char16, char16, uchar4);\n" "\n" "uchar4 __ovld __cnfn shuffle2(uchar2, uchar2, uchar4);\n" "uchar4 __ovld __cnfn shuffle2(uchar4, uchar4, uchar4);\n" "uchar4 __ovld __cnfn shuffle2(uchar8, uchar8, uchar4);\n" "uchar4 __ovld __cnfn shuffle2(uchar16, uchar16, uchar4);\n" "\n" "short4 __ovld __cnfn shuffle2(short2, short2, ushort4);\n" "short4 __ovld __cnfn shuffle2(short4, short4, ushort4);\n" "short4 __ovld __cnfn shuffle2(short8, short8, ushort4);\n" "short4 __ovld __cnfn shuffle2(short16, short16, ushort4);\n" "\n" "ushort4 __ovld __cnfn shuffle2(ushort2, ushort2, ushort4);\n" "ushort4 __ovld __cnfn shuffle2(ushort4, ushort4, ushort4);\n" "ushort4 __ovld __cnfn shuffle2(ushort8, ushort8, ushort4);\n" "ushort4 __ovld __cnfn shuffle2(ushort16, ushort16, ushort4);\n" "\n" "int4 __ovld __cnfn shuffle2(int2, int2, uint4);\n" "int4 __ovld __cnfn shuffle2(int4, int4, uint4);\n" "int4 __ovld __cnfn shuffle2(int8, int8, uint4);\n" "int4 __ovld __cnfn shuffle2(int16, int16, uint4);\n" "\n" "uint4 __ovld __cnfn shuffle2(uint2, uint2, uint4);\n" "uint4 __ovld __cnfn shuffle2(uint4, uint4, uint4);\n" "uint4 __ovld __cnfn shuffle2(uint8, uint8, uint4);\n" "uint4 __ovld __cnfn shuffle2(uint16, uint16, uint4);\n" "\n" "long4 __ovld __cnfn shuffle2(long2, long2, ulong4);\n" "long4 __ovld __cnfn shuffle2(long4, long4, ulong4);\n" "long4 __ovld __cnfn shuffle2(long8, long8, ulong4);\n" "long4 __ovld __cnfn shuffle2(long16, long16, ulong4);\n" "\n" "ulong4 __ovld __cnfn shuffle2(ulong2, ulong2, ulong4);\n" "ulong4 __ovld __cnfn shuffle2(ulong4, ulong4, ulong4);\n" "ulong4 __ovld __cnfn shuffle2(ulong8, ulong8, ulong4);\n" "ulong4 __ovld __cnfn shuffle2(ulong16, ulong16, ulong4);\n" "\n" "float4 __ovld __cnfn shuffle2(float2, float2, uint4);\n" "float4 __ovld __cnfn shuffle2(float4, float4, uint4);\n" "float4 __ovld __cnfn shuffle2(float8, float8, uint4);\n" "float4 __ovld __cnfn shuffle2(float16, float16, uint4);\n" "\n" "char8 __ovld __cnfn shuffle2(char2, char2, uchar8);\n" "char8 __ovld __cnfn shuffle2(char4, char4, uchar8);\n" "char8 __ovld __cnfn shuffle2(char8, char8, uchar8);\n" "char8 __ovld __cnfn shuffle2(char16, char16, uchar8);\n" "\n" "uchar8 __ovld __cnfn shuffle2(uchar2, uchar2, uchar8);\n" "uchar8 __ovld __cnfn shuffle2(uchar4, uchar4, uchar8);\n" "uchar8 __ovld __cnfn shuffle2(uchar8, uchar8, uchar8);\n" "uchar8 __ovld __cnfn shuffle2(uchar16, uchar16, uchar8);\n" "\n" "short8 __ovld __cnfn shuffle2(short2, short2, ushort8);\n" "short8 __ovld __cnfn shuffle2(short4, short4, ushort8);\n" "short8 __ovld __cnfn shuffle2(short8, short8, ushort8);\n" "short8 __ovld __cnfn shuffle2(short16, short16, ushort8);\n" "\n" "ushort8 __ovld __cnfn shuffle2(ushort2, ushort2, ushort8);\n" "ushort8 __ovld __cnfn shuffle2(ushort4, ushort4, ushort8);\n" "ushort8 __ovld __cnfn shuffle2(ushort8, ushort8, ushort8);\n" "ushort8 __ovld __cnfn shuffle2(ushort16, ushort16, ushort8);\n" "\n" "int8 __ovld __cnfn shuffle2(int2, int2, uint8);\n" "int8 __ovld __cnfn shuffle2(int4, int4, uint8);\n" "int8 __ovld __cnfn shuffle2(int8, int8, uint8);\n" "int8 __ovld __cnfn shuffle2(int16, int16, uint8);\n" "\n" "uint8 __ovld __cnfn shuffle2(uint2, uint2, uint8);\n" "uint8 __ovld __cnfn shuffle2(uint4, uint4, uint8);\n" "uint8 __ovld __cnfn shuffle2(uint8, uint8, uint8);\n" "uint8 __ovld __cnfn shuffle2(uint16, uint16, uint8);\n" "\n" "long8 __ovld __cnfn shuffle2(long2, long2, ulong8);\n" "long8 __ovld __cnfn shuffle2(long4, long4, ulong8);\n" "long8 __ovld __cnfn shuffle2(long8, long8, ulong8);\n" "long8 __ovld __cnfn shuffle2(long16, long16, ulong8);\n" "\n" "ulong8 __ovld __cnfn shuffle2(ulong2, ulong2, ulong8);\n" "ulong8 __ovld __cnfn shuffle2(ulong4, ulong4, ulong8);\n" "ulong8 __ovld __cnfn shuffle2(ulong8, ulong8, ulong8);\n" "ulong8 __ovld __cnfn shuffle2(ulong16, ulong16, ulong8);\n" "\n" "float8 __ovld __cnfn shuffle2(float2, float2, uint8);\n" "float8 __ovld __cnfn shuffle2(float4, float4, uint8);\n" "float8 __ovld __cnfn shuffle2(float8, float8, uint8);\n" "float8 __ovld __cnfn shuffle2(float16, float16, uint8);\n" "\n" "char16 __ovld __cnfn shuffle2(char2, char2, uchar16);\n" "char16 __ovld __cnfn shuffle2(char4, char4, uchar16);\n" "char16 __ovld __cnfn shuffle2(char8, char8, uchar16);\n" "char16 __ovld __cnfn shuffle2(char16, char16, uchar16);\n" "\n" "uchar16 __ovld __cnfn shuffle2(uchar2, uchar2, uchar16);\n" "uchar16 __ovld __cnfn shuffle2(uchar4, uchar4, uchar16);\n" "uchar16 __ovld __cnfn shuffle2(uchar8, uchar8, uchar16);\n" "uchar16 __ovld __cnfn shuffle2(uchar16, uchar16, uchar16);\n" "\n" "short16 __ovld __cnfn shuffle2(short2, short2, ushort16);\n" "short16 __ovld __cnfn shuffle2(short4, short4, ushort16);\n" "short16 __ovld __cnfn shuffle2(short8, short8, ushort16);\n" "short16 __ovld __cnfn shuffle2(short16, short16, ushort16);\n" "\n" "ushort16 __ovld __cnfn shuffle2(ushort2, ushort2, ushort16);\n" "ushort16 __ovld __cnfn shuffle2(ushort4, ushort4, ushort16);\n" "ushort16 __ovld __cnfn shuffle2(ushort8, ushort8, ushort16);\n" "ushort16 __ovld __cnfn shuffle2(ushort16, ushort16, ushort16);\n" "\n" "int16 __ovld __cnfn shuffle2(int2, int2, uint16);\n" "int16 __ovld __cnfn shuffle2(int4, int4, uint16);\n" "int16 __ovld __cnfn shuffle2(int8, int8, uint16);\n" "int16 __ovld __cnfn shuffle2(int16, int16, uint16);\n" "\n" "uint16 __ovld __cnfn shuffle2(uint2, uint2, uint16);\n" "uint16 __ovld __cnfn shuffle2(uint4, uint4, uint16);\n" "uint16 __ovld __cnfn shuffle2(uint8, uint8, uint16);\n" "uint16 __ovld __cnfn shuffle2(uint16, uint16, uint16);\n" "\n" "long16 __ovld __cnfn shuffle2(long2, long2, ulong16);\n" "long16 __ovld __cnfn shuffle2(long4, long4, ulong16);\n" "long16 __ovld __cnfn shuffle2(long8, long8, ulong16);\n" "long16 __ovld __cnfn shuffle2(long16, long16, ulong16);\n" "\n" "ulong16 __ovld __cnfn shuffle2(ulong2, ulong2, ulong16);\n" "ulong16 __ovld __cnfn shuffle2(ulong4, ulong4, ulong16);\n" "ulong16 __ovld __cnfn shuffle2(ulong8, ulong8, ulong16);\n" "ulong16 __ovld __cnfn shuffle2(ulong16, ulong16, ulong16);\n" "\n" "float16 __ovld __cnfn shuffle2(float2, float2, uint16);\n" "float16 __ovld __cnfn shuffle2(float4, float4, uint16);\n" "float16 __ovld __cnfn shuffle2(float8, float8, uint16);\n" "float16 __ovld __cnfn shuffle2(float16, float16, uint16);\n" "\n" "#ifdef cl_khr_fp64\n" "double2 __ovld __cnfn shuffle2(double2, double2, ulong2);\n" "double2 __ovld __cnfn shuffle2(double4, double4, ulong2);\n" "double2 __ovld __cnfn shuffle2(double8, double8, ulong2);\n" "double2 __ovld __cnfn shuffle2(double16, double16, ulong2);\n" "\n" "double4 __ovld __cnfn shuffle2(double2, double2, ulong4);\n" "double4 __ovld __cnfn shuffle2(double4, double4, ulong4);\n" "double4 __ovld __cnfn shuffle2(double8, double8, ulong4);\n" "double4 __ovld __cnfn shuffle2(double16, double16, ulong4);\n" "\n" "double8 __ovld __cnfn shuffle2(double2, double2, ulong8);\n" "double8 __ovld __cnfn shuffle2(double4, double4, ulong8);\n" "double8 __ovld __cnfn shuffle2(double8, double8, ulong8);\n" "double8 __ovld __cnfn shuffle2(double16, double16, ulong8);\n" "\n" "double16 __ovld __cnfn shuffle2(double2, double2, ulong16);\n" "double16 __ovld __cnfn shuffle2(double4, double4, ulong16);\n" "double16 __ovld __cnfn shuffle2(double8, double8, ulong16);\n" "double16 __ovld __cnfn shuffle2(double16, double16, ulong16);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half2 __ovld __cnfn shuffle2(half2, half2, ushort2);\n" "half2 __ovld __cnfn shuffle2(half4, half4, ushort2);\n" "half2 __ovld __cnfn shuffle2(half8, half8, ushort2);\n" "half2 __ovld __cnfn shuffle2(half16, half16, ushort2);\n" "\n" "half4 __ovld __cnfn shuffle2(half2, half2, ushort4);\n" "half4 __ovld __cnfn shuffle2(half4, half4, ushort4);\n" "half4 __ovld __cnfn shuffle2(half8, half8, ushort4);\n" "half4 __ovld __cnfn shuffle2(half16, half16, ushort4);\n" "\n" "half8 __ovld __cnfn shuffle2(half2, half2, ushort8);\n" "half8 __ovld __cnfn shuffle2(half4, half4, ushort8);\n" "half8 __ovld __cnfn shuffle2(half8, half8, ushort8);\n" "half8 __ovld __cnfn shuffle2(half16, half16, ushort8);\n" "\n" "half16 __ovld __cnfn shuffle2(half2, half2, ushort16);\n" "half16 __ovld __cnfn shuffle2(half4, half4, ushort16);\n" "half16 __ovld __cnfn shuffle2(half8, half8, ushort16);\n" "half16 __ovld __cnfn shuffle2(half16, half16, ushort16);\n" "#endif //cl_khr_fp16\n" "\n" "// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions\n" "\n" "#ifdef cl_khr_gl_msaa_sharing\n" "#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "/**\n" " * Use the coordinate (coord.xy) to do an element lookup in\n" " * the 2D image object specified by image.\n" " *\n" " * Use the coordinate (coord.x, coord.y, coord.z) to do\n" " * an element lookup in the 3D image object specified\n" " * by image. coord.w is ignored.\n" " *\n" " * Use the coordinate (coord.z) to index into the\n" " * 2D image array object specified by image_array\n" " * and (coord.x, coord.y) to do an element lookup in\n" " * the 2D image object specified by image.\n" " *\n" " * Use the coordinate (x) to do an element lookup in\n" " * the 1D image object specified by image.\n" " *\n" " * Use the coordinate (coord.y) to index into the\n" " * 1D image array object specified by image_array\n" " * and (coord.x) to do an element lookup in\n" " * the 1D image object specified by image.\n" " *\n" " * Use the coordinate (cood.xy) and sample to do an\n" " * element lookup in the 2D multi-sample image specified\n" " * by image.\n" " *\n" " * Use coord.xy and sample to do an element\n" " * lookup in the 2D multi-sample image layer\n" " * identified by index coord.z in the 2D multi-sample\n" " * image array specified by image.\n" " *\n" " * For mipmap images, use the mip-level specified by\n" " * the Level-of-Detail (lod) or use gradients for LOD\n" " * computation.\n" " *\n" " * read_imagef returns floating-point values in the\n" " * range [0.0 ... 1.0] for image objects created with\n" " * image_channel_data_type set to one of the predefined\n" " * packed formats or CL_UNORM_INT8, or\n" " * CL_UNORM_INT16.\n" " *\n" " * read_imagef returns floating-point values in the\n" " * range [-1.0 ... 1.0] for image objects created with\n" " * image_channel_data_type set to CL_SNORM_INT8,\n" " * or CL_SNORM_INT16.\n" " *\n" " * read_imagef returns floating-point values for image\n" " * objects created with image_channel_data_type set to\n" " * CL_HALF_FLOAT or CL_FLOAT.\n" " *\n" " * read_imagei and read_imageui return\n" " * unnormalized signed integer and unsigned integer\n" " * values respectively. Each channel will be stored in a\n" " * 32-bit integer.\n" " *\n" " * read_imagei can only be used with image objects\n" " * created with image_channel_data_type set to one of\n" " * the following values:\n" " * CL_SIGNED_INT8,\n" " * CL_SIGNED_INT16 and\n" " * CL_SIGNED_INT32.\n" " * If the image_channel_data_type is not one of the\n" " * above values, the values returned by read_imagei\n" " * are undefined.\n" " *\n" " * read_imageui can only be used with image objects\n" " * created with image_channel_data_type set to one of\n" " * the following values:\n" " * CL_UNSIGNED_INT8,\n" " * CL_UNSIGNED_INT16 and\n" " * CL_UNSIGNED_INT32.\n" " * If the image_channel_data_type is not one of the\n" " * above values, the values returned by read_imageui\n" " * are undefined.\n" " *\n" " * The read_image{i|ui} calls support a nearest filter\n" " * only. The filter_mode specified in sampler\n" " * must be set to CLK_FILTER_NEAREST; otherwise\n" " * the values returned are undefined.\n" "\n" " * The read_image{f|i|ui} calls that take\n" " * integer coordinates must use a sampler with\n" " * normalized coordinates set to\n" " * CLK_NORMALIZED_COORDS_FALSE and\n" " * addressing mode set to\n" " * CLK_ADDRESS_CLAMP_TO_EDGE,\n" " * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;\n" " * otherwise the values returned are undefined.\n" " *\n" " * Values returned by read_imagef for image objects\n" " * with image_channel_data_type values not specified\n" " * in the description above are undefined.\n" " */\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, int2);\n" "float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2);\n" "\n" "int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, int2);\n" "int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, int4);\n" "float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4);\n" "\n" "int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, int4);\n" "int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4);\n" "uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4);\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, int4);\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4);\n" "\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, int4);\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4);\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, int);\n" "float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float);\n" "\n" "int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, int);\n" "int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, int);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float);\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, int2);\n" "float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2);\n" "\n" "int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, int2);\n" "int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2);\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2);\n" "float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, int2);\n" "\n" "float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4);\n" "float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, int4);\n" "#endif //cl_khr_depth_images\n" "\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "float4 __ovld __purefn read_imagef(read_only image2d_msaa_t, int2, int);\n" "int4 __ovld __purefn read_imagei(read_only image2d_msaa_t, int2, int);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_msaa_t, int2, int);\n" "\n" "float __ovld __purefn read_imagef(read_only image2d_msaa_depth_t, int2, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_msaa_t, int4, int);\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_msaa_t, int4, int);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_msaa_t, int4, int);\n" "\n" "float __ovld __purefn read_imagef(read_only image2d_array_msaa_depth_t, int4, int);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "// OpenCL Extension v2.0 s9.18 - Mipmaps\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "#ifdef cl_khr_mipmap_image\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float);\n" "int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float);\n" "int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float);\n" "int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float);\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float);\n" "#endif // cl_khr_depth_images\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float);\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float);\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float);\n" "#endif // cl_khr_depth_images\n" "\n" "float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float);\n" "int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float);\n" "uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float, float);\n" "int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float, float);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float, float);\n" "int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float, float);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float2, float2);\n" "int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2);\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2);\n" "#endif // cl_khr_depth_images\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2);\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2);\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2);\n" "#endif // cl_khr_depth_images\n" "\n" "float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4);\n" "int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4);\n" "uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float4, float4);\n" "\n" "#endif //cl_khr_mipmap_image\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "\n" "/**\n" "* Sampler-less Image Access\n" "*/\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_t, int);\n" "int4 __ovld __purefn read_imagei(read_only image1d_t, int);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_t, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_buffer_t, int);\n" "int4 __ovld __purefn read_imagei(read_only image1d_buffer_t, int);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_buffer_t, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image1d_array_t, int2);\n" "int4 __ovld __purefn read_imagei(read_only image1d_array_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_only image1d_array_t, int2);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_t, int2);\n" "int4 __ovld __purefn read_imagei(read_only image2d_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_t, int2);\n" "\n" "float4 __ovld __purefn read_imagef(read_only image2d_array_t, int4);\n" "int4 __ovld __purefn read_imagei(read_only image2d_array_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_only image2d_array_t, int4);\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_only image2d_depth_t, int2);\n" "float __ovld __purefn read_imagef(read_only image2d_array_depth_t, int4);\n" "#endif //cl_khr_depth_images\n" "\n" "float4 __ovld __purefn read_imagef(read_only image3d_t, int4);\n" "int4 __ovld __purefn read_imagei(read_only image3d_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_only image3d_t, int4);\n" "\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "\n" "// Image read functions returning half4 type\n" "#ifdef cl_khr_fp16\n" "half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, int);\n" "half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, float);\n" "half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, int2);\n" "half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, float2);\n" "half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, int4);\n" "half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, float4);\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, int2);\n" "half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, float2);\n" "half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, int4);\n" "half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, float4);\n" "/**\n" " * Sampler-less Image Access\n" " */\n" "half4 __ovld __purefn read_imageh(read_only image1d_t, int);\n" "half4 __ovld __purefn read_imageh(read_only image2d_t, int2);\n" "half4 __ovld __purefn read_imageh(read_only image3d_t, int4);\n" "half4 __ovld __purefn read_imageh(read_only image1d_array_t, int2);\n" "half4 __ovld __purefn read_imageh(read_only image2d_array_t, int4);\n" "half4 __ovld __purefn read_imageh(read_only image1d_buffer_t, int);\n" "#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)\n" "#endif //cl_khr_fp16\n" "\n" "// Image read functions for read_write images\n" "#if defined(__opencl_c_read_write_images)\n" "float4 __ovld __purefn read_imagef(read_write image1d_t, int);\n" "int4 __ovld __purefn read_imagei(read_write image1d_t, int);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_t, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image1d_buffer_t, int);\n" "int4 __ovld __purefn read_imagei(read_write image1d_buffer_t, int);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_buffer_t, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image1d_array_t, int2);\n" "int4 __ovld __purefn read_imagei(read_write image1d_array_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_array_t, int2);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_t, int2);\n" "int4 __ovld __purefn read_imagei(read_write image2d_t, int2);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_t, int2);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4);\n" "int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "float4 __ovld __purefn read_imagef(read_write image3d_t, int4);\n" "int4 __ovld __purefn read_imagei(read_write image3d_t, int4);\n" "uint4 __ovld __purefn read_imageui(read_write image3d_t, int4);\n" "#endif // cl_khr_3d_image_writes\n" "\n" "#ifdef cl_khr_depth_images\n" "float __ovld __purefn read_imagef(read_write image2d_depth_t, int2);\n" "float __ovld __purefn read_imagef(read_write image2d_array_depth_t, int4);\n" "#endif //cl_khr_depth_images\n" "\n" "#if cl_khr_gl_msaa_sharing\n" "float4 __ovld __purefn read_imagef(read_write image2d_msaa_t, int2, int);\n" "int4 __ovld __purefn read_imagei(read_write image2d_msaa_t, int2, int);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_msaa_t, int2, int);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_array_msaa_t, int4, int);\n" "int4 __ovld __purefn read_imagei(read_write image2d_array_msaa_t, int4, int);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_array_msaa_t, int4, int);\n" "\n" "float __ovld __purefn read_imagef(read_write image2d_msaa_depth_t, int2, int);\n" "float __ovld __purefn read_imagef(read_write image2d_array_msaa_depth_t, int4, int);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#ifdef cl_khr_mipmap_image\n" "float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float);\n" "int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float);\n" "int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float);\n" "int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float);\n" "\n" "float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float);\n" "int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float);\n" "\n" "float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float);\n" "int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float);\n" "uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float);\n" "#endif // cl_khr_3d_image_writes\n" "\n" "float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float);\n" "int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float, float);\n" "int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float, float);\n" "uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float, float);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float2, float2);\n" "int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float2, float2);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float2, float2);\n" "\n" "float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float2, float2);\n" "\n" "float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float2, float2);\n" "int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float2, float2);\n" "uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float2, float2);\n" "\n" "float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4);\n" "int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4);\n" "uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4);\n" "#endif // cl_khr_3d_image_writes\n" "\n" "#endif //cl_khr_mipmap_image\n" "\n" "// Image read functions returning half4 type\n" "#ifdef cl_khr_fp16\n" "half4 __ovld __purefn read_imageh(read_write image1d_t, int);\n" "half4 __ovld __purefn read_imageh(read_write image2d_t, int2);\n" "#ifdef cl_khr_3d_image_writes\n" "half4 __ovld __purefn read_imageh(read_write image3d_t, int4);\n" "#endif // cl_khr_3d_image_writes\n" "half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2);\n" "half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4);\n" "half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Write color value to location specified by coordinate\n" " * (coord.x, coord.y) in the 2D image object specified by image.\n" " * (coord.x, coord.y) are considered to be unnormalized coordinates\n" " * and must be in the range 0 ... image width - 1, and 0\n" " * ... image height - 1.\n" "\n" " * Write color value to location specified by coordinate\n" " * (coord.x, coord.y) in the 2D image object specified by index\n" " * (coord.z) of the 2D image array object image_array.\n" " * (coord.x, coord.y) are considered to be unnormalized\n" " * coordinates and must be in the range 0 ... image width\n" " * - 1.\n" " *\n" " * Write color value to location specified by coordinate\n" " * (coord) in the 1D image (buffer) object specified by image.\n" " * coord is considered to be unnormalized coordinates\n" " * and must be in the range 0 ... image width - 1.\n" " *\n" " * Write color value to location specified by coordinate\n" " * (coord.x) in the 1D image object specified by index\n" " * (coord.y) of the 1D image array object image_array.\n" " * x is considered to be unnormalized coordinates\n" " * and must be in the range 0 ... image width - 1.\n" " *\n" " * Write color value to location specified by coordinate\n" " * (coord.x, coord.y, coord.z) in the 3D image object specified by image.\n" " * coord.x & coord.y are considered to be unnormalized coordinates\n" " * and must be in the range 0 ... image width - 1, and 0\n" " * ... image height - 1.\n" " *\n" " * For mipmap images, use mip-level specified by lod.\n" " *\n" " * Appropriate data format conversion to the specified\n" " * image format is done before writing the color value.\n" " *\n" " * write_imagef can only be used with image objects\n" " * created with image_channel_data_type set to one of\n" " * the pre-defined packed formats or set to\n" " * CL_SNORM_INT8, CL_UNORM_INT8,\n" " * CL_SNORM_INT16, CL_UNORM_INT16,\n" " * CL_HALF_FLOAT or CL_FLOAT. Appropriate data\n" " * format conversion will be done to convert channel\n" " * data from a floating-point value to actual data format\n" " * in which the channels are stored.\n" " *\n" " * write_imagei can only be used with image objects\n" " * created with image_channel_data_type set to one of\n" " * the following values:\n" " * CL_SIGNED_INT8,\n" " * CL_SIGNED_INT16 and\n" " * CL_SIGNED_INT32.\n" " *\n" " * write_imageui can only be used with image objects\n" " * created with image_channel_data_type set to one of\n" " * the following values:\n" " * CL_UNSIGNED_INT8,\n" " * CL_UNSIGNED_INT16 and\n" " * CL_UNSIGNED_INT32.\n" " *\n" " * The behavior of write_imagef, write_imagei and\n" " * write_imageui for image objects created with\n" " * image_channel_data_type values not specified in\n" " * the description above or with (x, y) coordinate\n" " * values that are not in the range (0 ... image width -1,\n" " * 0 ... image height - 1), respectively, is undefined.\n" " */\n" "void __ovld write_imagef(write_only image2d_t, int2, float4);\n" "void __ovld write_imagei(write_only image2d_t, int2, int4);\n" "void __ovld write_imageui(write_only image2d_t, int2, uint4);\n" "\n" "void __ovld write_imagef(write_only image2d_array_t, int4, float4);\n" "void __ovld write_imagei(write_only image2d_array_t, int4, int4);\n" "void __ovld write_imageui(write_only image2d_array_t, int4, uint4);\n" "\n" "void __ovld write_imagef(write_only image1d_t, int, float4);\n" "void __ovld write_imagei(write_only image1d_t, int, int4);\n" "void __ovld write_imageui(write_only image1d_t, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image1d_buffer_t, int, float4);\n" "void __ovld write_imagei(write_only image1d_buffer_t, int, int4);\n" "void __ovld write_imageui(write_only image1d_buffer_t, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image1d_array_t, int2, float4);\n" "void __ovld write_imagei(write_only image1d_array_t, int2, int4);\n" "void __ovld write_imageui(write_only image1d_array_t, int2, uint4);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imagef(write_only image3d_t, int4, float4);\n" "void __ovld write_imagei(write_only image3d_t, int4, int4);\n" "void __ovld write_imageui(write_only image3d_t, int4, uint4);\n" "#endif\n" "\n" "#ifdef cl_khr_depth_images\n" "void __ovld write_imagef(write_only image2d_depth_t, int2, float);\n" "void __ovld write_imagef(write_only image2d_array_depth_t, int4, float);\n" "#endif //cl_khr_depth_images\n" "\n" "// OpenCL Extension v2.0 s9.18 - Mipmaps\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "#if defined(cl_khr_mipmap_image_writes)\n" "void __ovld write_imagef(write_only image1d_t, int, int, float4);\n" "void __ovld write_imagei(write_only image1d_t, int, int, int4);\n" "void __ovld write_imageui(write_only image1d_t, int, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image1d_array_t, int2, int, float4);\n" "void __ovld write_imagei(write_only image1d_array_t, int2, int, int4);\n" "void __ovld write_imageui(write_only image1d_array_t, int2, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image2d_t, int2, int, float4);\n" "void __ovld write_imagei(write_only image2d_t, int2, int, int4);\n" "void __ovld write_imageui(write_only image2d_t, int2, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image2d_array_t, int4, int, float4);\n" "void __ovld write_imagei(write_only image2d_array_t, int4, int, int4);\n" "void __ovld write_imageui(write_only image2d_array_t, int4, int, uint4);\n" "\n" "void __ovld write_imagef(write_only image2d_depth_t, int2, int, float);\n" "void __ovld write_imagef(write_only image2d_array_depth_t, int4, int, float);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imagef(write_only image3d_t, int4, int, float4);\n" "void __ovld write_imagei(write_only image3d_t, int4, int, int4);\n" "void __ovld write_imageui(write_only image3d_t, int4, int, uint4);\n" "#endif //cl_khr_3d_image_writes\n" "\n" "#endif //defined(cl_khr_mipmap_image_writes)\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// Image write functions for half4 type\n" "#ifdef cl_khr_fp16\n" "void __ovld write_imageh(write_only image1d_t, int, half4);\n" "void __ovld write_imageh(write_only image2d_t, int2, half4);\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imageh(write_only image3d_t, int4, half4);\n" "#endif\n" "void __ovld write_imageh(write_only image1d_array_t, int2, half4);\n" "void __ovld write_imageh(write_only image2d_array_t, int4, half4);\n" "void __ovld write_imageh(write_only image1d_buffer_t, int, half4);\n" "#endif //cl_khr_fp16\n" "\n" "// Image write functions for read_write images\n" "#if defined(__opencl_c_read_write_images)\n" "void __ovld write_imagef(read_write image2d_t, int2, float4);\n" "void __ovld write_imagei(read_write image2d_t, int2, int4);\n" "void __ovld write_imageui(read_write image2d_t, int2, uint4);\n" "\n" "void __ovld write_imagef(read_write image2d_array_t, int4, float4);\n" "void __ovld write_imagei(read_write image2d_array_t, int4, int4);\n" "void __ovld write_imageui(read_write image2d_array_t, int4, uint4);\n" "\n" "void __ovld write_imagef(read_write image1d_t, int, float4);\n" "void __ovld write_imagei(read_write image1d_t, int, int4);\n" "void __ovld write_imageui(read_write image1d_t, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image1d_buffer_t, int, float4);\n" "void __ovld write_imagei(read_write image1d_buffer_t, int, int4);\n" "void __ovld write_imageui(read_write image1d_buffer_t, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image1d_array_t, int2, float4);\n" "void __ovld write_imagei(read_write image1d_array_t, int2, int4);\n" "void __ovld write_imageui(read_write image1d_array_t, int2, uint4);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imagef(read_write image3d_t, int4, float4);\n" "void __ovld write_imagei(read_write image3d_t, int4, int4);\n" "void __ovld write_imageui(read_write image3d_t, int4, uint4);\n" "#endif\n" "\n" "#ifdef cl_khr_depth_images\n" "void __ovld write_imagef(read_write image2d_depth_t, int2, float);\n" "void __ovld write_imagef(read_write image2d_array_depth_t, int4, float);\n" "#endif //cl_khr_depth_images\n" "\n" "#if defined(cl_khr_mipmap_image_writes)\n" "void __ovld write_imagef(read_write image1d_t, int, int, float4);\n" "void __ovld write_imagei(read_write image1d_t, int, int, int4);\n" "void __ovld write_imageui(read_write image1d_t, int, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image1d_array_t, int2, int, float4);\n" "void __ovld write_imagei(read_write image1d_array_t, int2, int, int4);\n" "void __ovld write_imageui(read_write image1d_array_t, int2, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image2d_t, int2, int, float4);\n" "void __ovld write_imagei(read_write image2d_t, int2, int, int4);\n" "void __ovld write_imageui(read_write image2d_t, int2, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image2d_array_t, int4, int, float4);\n" "void __ovld write_imagei(read_write image2d_array_t, int4, int, int4);\n" "void __ovld write_imageui(read_write image2d_array_t, int4, int, uint4);\n" "\n" "void __ovld write_imagef(read_write image2d_depth_t, int2, int, float);\n" "void __ovld write_imagef(read_write image2d_array_depth_t, int4, int, float);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imagef(read_write image3d_t, int4, int, float4);\n" "void __ovld write_imagei(read_write image3d_t, int4, int, int4);\n" "void __ovld write_imageui(read_write image3d_t, int4, int, uint4);\n" "#endif //cl_khr_3d_image_writes\n" "\n" "#endif //cl_khr_mipmap_image_writes\n" "\n" "// Image write functions for half4 type\n" "#ifdef cl_khr_fp16\n" "void __ovld write_imageh(read_write image1d_t, int, half4);\n" "void __ovld write_imageh(read_write image2d_t, int2, half4);\n" "#ifdef cl_khr_3d_image_writes\n" "void __ovld write_imageh(read_write image3d_t, int4, half4);\n" "#endif\n" "void __ovld write_imageh(read_write image1d_array_t, int2, half4);\n" "void __ovld write_imageh(read_write image2d_array_t, int4, half4);\n" "void __ovld write_imageh(read_write image1d_buffer_t, int, half4);\n" "#endif //cl_khr_fp16\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have\n" "// access qualifier, which by default assume read_only access qualifier. Image query builtin\n" "// functions with write_only image argument should also be declared.\n" "\n" "/**\n" " * Return the image width in pixels.\n" " *\n" " */\n" "int __ovld __cnfn get_image_width(read_only image1d_t);\n" "int __ovld __cnfn get_image_width(read_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_t);\n" "int __ovld __cnfn get_image_width(read_only image3d_t);\n" "int __ovld __cnfn get_image_width(read_only image1d_array_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_width(read_only image2d_depth_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_width(read_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "int __ovld __cnfn get_image_width(write_only image1d_t);\n" "int __ovld __cnfn get_image_width(write_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_width(write_only image3d_t);\n" "#endif\n" "int __ovld __cnfn get_image_width(write_only image1d_array_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_width(write_only image2d_depth_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_width(write_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_width(read_write image1d_t);\n" "int __ovld __cnfn get_image_width(read_write image1d_buffer_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_width(read_write image3d_t);\n" "#endif // cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_width(read_write image1d_array_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_width(read_write image2d_depth_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_width(read_write image2d_msaa_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Return the image height in pixels.\n" " */\n" "int __ovld __cnfn get_image_height(read_only image2d_t);\n" "int __ovld __cnfn get_image_height(read_only image3d_t);\n" "int __ovld __cnfn get_image_height(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_height(read_only image2d_depth_t);\n" "int __ovld __cnfn get_image_height(read_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_height(read_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "int __ovld __cnfn get_image_height(write_only image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_height(write_only image3d_t);\n" "#endif\n" "int __ovld __cnfn get_image_height(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_height(write_only image2d_depth_t);\n" "int __ovld __cnfn get_image_height(write_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_height(write_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_height(read_write image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_height(read_write image3d_t);\n" "#endif // cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_height(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_height(read_write image2d_depth_t);\n" "int __ovld __cnfn get_image_height(read_write image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_height(read_write image2d_msaa_t);\n" "int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Return the image depth in pixels.\n" " */\n" "int __ovld __cnfn get_image_depth(read_only image3d_t);\n" "\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_depth(write_only image3d_t);\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_depth(read_write image3d_t);\n" "#endif //defined(__opencl_c_read_write_images)\n" "#endif // cl_khr_3d_image_writes\n" "\n" "// OpenCL Extension v2.0 s9.18 - Mipmaps\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "#ifdef cl_khr_mipmap_image\n" "/**\n" " * Return the image miplevels.\n" " */\n" "\n" "int __ovld get_image_num_mip_levels(read_only image1d_t);\n" "int __ovld get_image_num_mip_levels(read_only image2d_t);\n" "int __ovld get_image_num_mip_levels(read_only image3d_t);\n" "\n" "int __ovld get_image_num_mip_levels(write_only image1d_t);\n" "int __ovld get_image_num_mip_levels(write_only image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld get_image_num_mip_levels(write_only image3d_t);\n" "#endif\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld get_image_num_mip_levels(read_write image1d_t);\n" "int __ovld get_image_num_mip_levels(read_write image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld get_image_num_mip_levels(read_write image3d_t);\n" "#endif // cl_khr_3d_image_writes\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "int __ovld get_image_num_mip_levels(read_only image1d_array_t);\n" "int __ovld get_image_num_mip_levels(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t);\n" "int __ovld get_image_num_mip_levels(read_only image2d_depth_t);\n" "#endif // cl_khr_depth_images\n" "\n" "int __ovld get_image_num_mip_levels(write_only image1d_array_t);\n" "int __ovld get_image_num_mip_levels(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t);\n" "int __ovld get_image_num_mip_levels(write_only image2d_depth_t);\n" "#endif // cl_khr_depth_images\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld get_image_num_mip_levels(read_write image1d_array_t);\n" "int __ovld get_image_num_mip_levels(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t);\n" "int __ovld get_image_num_mip_levels(read_write image2d_depth_t);\n" "#endif // cl_khr_depth_images\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "#endif //cl_khr_mipmap_image\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "/**\n" " * Return the channel data type. Valid values are:\n" " * CLK_SNORM_INT8\n" " * CLK_SNORM_INT16\n" " * CLK_UNORM_INT8\n" " * CLK_UNORM_INT16\n" " * CLK_UNORM_SHORT_565\n" " * CLK_UNORM_SHORT_555\n" " * CLK_UNORM_SHORT_101010\n" " * CLK_SIGNED_INT8\n" " * CLK_SIGNED_INT16\n" " * CLK_SIGNED_INT32\n" " * CLK_UNSIGNED_INT8\n" " * CLK_UNSIGNED_INT16\n" " * CLK_UNSIGNED_INT32\n" " * CLK_HALF_FLOAT\n" " * CLK_FLOAT\n" " */\n" "\n" "int __ovld __cnfn get_image_channel_data_type(read_only image1d_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image3d_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "int __ovld __cnfn get_image_channel_data_type(write_only image1d_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_data_type(write_only image3d_t);\n" "#endif\n" "int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_channel_data_type(read_write image1d_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_data_type(read_write image3d_t);\n" "#endif // cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Return the image channel order. Valid values are:\n" " * CLK_A\n" " * CLK_R\n" " * CLK_Rx\n" " * CLK_RG\n" " * CLK_RGx\n" " * CLK_RA\n" " * CLK_RGB\n" " * CLK_RGBx\n" " * CLK_RGBA\n" " * CLK_ARGB\n" " * CLK_BGRA\n" " * CLK_INTENSITY\n" " * CLK_LUMINANCE\n" " */\n" "\n" "int __ovld __cnfn get_image_channel_order(read_only image1d_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image3d_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image1d_array_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "int __ovld __cnfn get_image_channel_order(write_only image1d_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_order(write_only image3d_t);\n" "#endif\n" "int __ovld __cnfn get_image_channel_order(write_only image1d_array_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_channel_order(read_write image1d_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_order(read_write image3d_t);\n" "#endif // cl_khr_3d_image_writes\n" "int __ovld __cnfn get_image_channel_order(read_write image1d_array_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Return the 2D image width and height as an int2\n" " * type. The width is returned in the x component, and\n" " * the height in the y component.\n" " */\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_t);\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t);\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t);\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_t);\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t);\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t);\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_t);\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t);\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t);\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t);\n" "int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" " * Return the 3D image width, height, and depth as an\n" " * int4 type. The width is returned in the x\n" " * component, height in the y component, depth in the z\n" " * component and the w component is 0.\n" " */\n" "int4 __ovld __cnfn get_image_dim(read_only image3d_t);\n" "#ifdef cl_khr_3d_image_writes\n" "int4 __ovld __cnfn get_image_dim(write_only image3d_t);\n" "#if defined(__opencl_c_read_write_images)\n" "int4 __ovld __cnfn get_image_dim(read_write image3d_t);\n" "#endif //defined(__opencl_c_read_write_images)\n" "#endif // cl_khr_3d_image_writes\n" "\n" "/**\n" " * Return the image array size.\n" " */\n" "\n" "size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t);\n" "size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t);\n" "size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t);\n" "size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t);\n" "size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t);\n" "size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t);\n" "#ifdef cl_khr_depth_images\n" "size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t);\n" "#endif //cl_khr_depth_images\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t);\n" "size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t);\n" "#endif //cl_khr_gl_msaa_sharing\n" "#endif //defined(__opencl_c_read_write_images)\n" "\n" "/**\n" "* Return the number of samples associated with image\n" "*/\n" "#if defined(cl_khr_gl_msaa_sharing)\n" "int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_depth_t);\n" "\n" "int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_depth_t);\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_depth_t);\n" "int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_t);\n" "int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_depth_t);\n" "#endif //defined(__opencl_c_read_write_images)\n" "#endif\n" "\n" "// OpenCL v2.0 s6.13.15 - Work-group Functions\n" "\n" "#if defined(__opencl_c_work_group_collective_functions)\n" "int __ovld __conv work_group_all(int predicate);\n" "int __ovld __conv work_group_any(int predicate);\n" "\n" "#ifdef cl_khr_fp16\n" "half __ovld __conv work_group_broadcast(half, size_t local_id);\n" "half __ovld __conv work_group_broadcast(half, size_t, size_t);\n" "half __ovld __conv work_group_broadcast(half, size_t, size_t, size_t);\n" "#endif\n" "int __ovld __conv work_group_broadcast(int, size_t local_id);\n" "int __ovld __conv work_group_broadcast(int, size_t, size_t);\n" "int __ovld __conv work_group_broadcast(int, size_t, size_t, size_t);\n" "uint __ovld __conv work_group_broadcast(uint, size_t local_id);\n" "uint __ovld __conv work_group_broadcast(uint, size_t, size_t);\n" "uint __ovld __conv work_group_broadcast(uint, size_t, size_t, size_t);\n" "long __ovld __conv work_group_broadcast(long, size_t local_id);\n" "long __ovld __conv work_group_broadcast(long, size_t, size_t);\n" "long __ovld __conv work_group_broadcast(long, size_t, size_t, size_t);\n" "ulong __ovld __conv work_group_broadcast(ulong, size_t local_id);\n" "ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t);\n" "ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t, size_t);\n" "float __ovld __conv work_group_broadcast(float, size_t local_id);\n" "float __ovld __conv work_group_broadcast(float, size_t, size_t);\n" "float __ovld __conv work_group_broadcast(float, size_t, size_t, size_t);\n" "#ifdef cl_khr_fp64\n" "double __ovld __conv work_group_broadcast(double, size_t local_id);\n" "double __ovld __conv work_group_broadcast(double, size_t, size_t);\n" "double __ovld __conv work_group_broadcast(double, size_t, size_t, size_t);\n" "#endif //cl_khr_fp64\n" "\n" "#ifdef cl_khr_fp16\n" "half __ovld __conv work_group_reduce_add(half);\n" "half __ovld __conv work_group_reduce_min(half);\n" "half __ovld __conv work_group_reduce_max(half);\n" "half __ovld __conv work_group_scan_exclusive_add(half);\n" "half __ovld __conv work_group_scan_exclusive_min(half);\n" "half __ovld __conv work_group_scan_exclusive_max(half);\n" "half __ovld __conv work_group_scan_inclusive_add(half);\n" "half __ovld __conv work_group_scan_inclusive_min(half);\n" "half __ovld __conv work_group_scan_inclusive_max(half);\n" "#endif\n" "int __ovld __conv work_group_reduce_add(int);\n" "int __ovld __conv work_group_reduce_min(int);\n" "int __ovld __conv work_group_reduce_max(int);\n" "int __ovld __conv work_group_scan_exclusive_add(int);\n" "int __ovld __conv work_group_scan_exclusive_min(int);\n" "int __ovld __conv work_group_scan_exclusive_max(int);\n" "int __ovld __conv work_group_scan_inclusive_add(int);\n" "int __ovld __conv work_group_scan_inclusive_min(int);\n" "int __ovld __conv work_group_scan_inclusive_max(int);\n" "uint __ovld __conv work_group_reduce_add(uint);\n" "uint __ovld __conv work_group_reduce_min(uint);\n" "uint __ovld __conv work_group_reduce_max(uint);\n" "uint __ovld __conv work_group_scan_exclusive_add(uint);\n" "uint __ovld __conv work_group_scan_exclusive_min(uint);\n" "uint __ovld __conv work_group_scan_exclusive_max(uint);\n" "uint __ovld __conv work_group_scan_inclusive_add(uint);\n" "uint __ovld __conv work_group_scan_inclusive_min(uint);\n" "uint __ovld __conv work_group_scan_inclusive_max(uint);\n" "long __ovld __conv work_group_reduce_add(long);\n" "long __ovld __conv work_group_reduce_min(long);\n" "long __ovld __conv work_group_reduce_max(long);\n" "long __ovld __conv work_group_scan_exclusive_add(long);\n" "long __ovld __conv work_group_scan_exclusive_min(long);\n" "long __ovld __conv work_group_scan_exclusive_max(long);\n" "long __ovld __conv work_group_scan_inclusive_add(long);\n" "long __ovld __conv work_group_scan_inclusive_min(long);\n" "long __ovld __conv work_group_scan_inclusive_max(long);\n" "ulong __ovld __conv work_group_reduce_add(ulong);\n" "ulong __ovld __conv work_group_reduce_min(ulong);\n" "ulong __ovld __conv work_group_reduce_max(ulong);\n" "ulong __ovld __conv work_group_scan_exclusive_add(ulong);\n" "ulong __ovld __conv work_group_scan_exclusive_min(ulong);\n" "ulong __ovld __conv work_group_scan_exclusive_max(ulong);\n" "ulong __ovld __conv work_group_scan_inclusive_add(ulong);\n" "ulong __ovld __conv work_group_scan_inclusive_min(ulong);\n" "ulong __ovld __conv work_group_scan_inclusive_max(ulong);\n" "float __ovld __conv work_group_reduce_add(float);\n" "float __ovld __conv work_group_reduce_min(float);\n" "float __ovld __conv work_group_reduce_max(float);\n" "float __ovld __conv work_group_scan_exclusive_add(float);\n" "float __ovld __conv work_group_scan_exclusive_min(float);\n" "float __ovld __conv work_group_scan_exclusive_max(float);\n" "float __ovld __conv work_group_scan_inclusive_add(float);\n" "float __ovld __conv work_group_scan_inclusive_min(float);\n" "float __ovld __conv work_group_scan_inclusive_max(float);\n" "#ifdef cl_khr_fp64\n" "double __ovld __conv work_group_reduce_add(double);\n" "double __ovld __conv work_group_reduce_min(double);\n" "double __ovld __conv work_group_reduce_max(double);\n" "double __ovld __conv work_group_scan_exclusive_add(double);\n" "double __ovld __conv work_group_scan_exclusive_min(double);\n" "double __ovld __conv work_group_scan_exclusive_max(double);\n" "double __ovld __conv work_group_scan_inclusive_add(double);\n" "double __ovld __conv work_group_scan_inclusive_min(double);\n" "double __ovld __conv work_group_scan_inclusive_max(double);\n" "#endif //cl_khr_fp64\n" "\n" "#endif //defined(__opencl_c_work_group_collective_functions)\n" "\n" "// OpenCL v2.0 s6.13.16 - Pipe Functions\n" "#if defined(__opencl_c_pipes)\n" "bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);\n" "#endif //defined(__opencl_c_pipes)\n" "\n" "\n" "// OpenCL v2.0 s6.13.17 - Enqueue Kernels\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "#ifdef __opencl_c_device_enqueue\n" "ndrange_t __ovld ndrange_1D(size_t);\n" "ndrange_t __ovld ndrange_1D(size_t, size_t);\n" "ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);\n" "\n" "ndrange_t __ovld ndrange_2D(const size_t[2]);\n" "ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);\n" "ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);\n" "\n" "ndrange_t __ovld ndrange_3D(const size_t[3]);\n" "ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);\n" "ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);\n" "\n" "int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);\n" "\n" "void __ovld retain_event(clk_event_t);\n" "\n" "void __ovld release_event(clk_event_t);\n" "\n" "clk_event_t __ovld create_user_event(void);\n" "\n" "void __ovld set_user_event_status(clk_event_t e, int state);\n" "\n" "bool __ovld is_valid_event (clk_event_t event);\n" "\n" "void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void*);\n" "\n" "queue_t __ovld get_default_queue(void);\n" "#endif //__opencl_c_device_enqueue\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "// OpenCL Extension v2.0 s9.17 - Sub-groups\n" "\n" "#if defined(__opencl_subgroup_builtins)\n" "// Shared Sub Group Functions\n" "uint __ovld get_sub_group_size(void);\n" "uint __ovld get_max_sub_group_size(void);\n" "uint __ovld get_num_sub_groups(void);\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "uint __ovld get_enqueued_num_sub_groups(void);\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "uint __ovld get_sub_group_id(void);\n" "uint __ovld get_sub_group_local_id(void);\n" "\n" "void __ovld __conv sub_group_barrier(cl_mem_fence_flags);\n" "#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "void __ovld __conv sub_group_barrier(cl_mem_fence_flags, memory_scope);\n" "#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)\n" "\n" "int __ovld __conv sub_group_all(int predicate);\n" "int __ovld __conv sub_group_any(int predicate);\n" "\n" "int __ovld __conv sub_group_broadcast(int , uint sub_group_local_id);\n" "uint __ovld __conv sub_group_broadcast(uint , uint sub_group_local_id);\n" "long __ovld __conv sub_group_broadcast(long , uint sub_group_local_id);\n" "ulong __ovld __conv sub_group_broadcast(ulong, uint sub_group_local_id);\n" "float __ovld __conv sub_group_broadcast(float, uint sub_group_local_id);\n" "\n" "int __ovld __conv sub_group_reduce_add(int );\n" "uint __ovld __conv sub_group_reduce_add(uint );\n" "long __ovld __conv sub_group_reduce_add(long );\n" "ulong __ovld __conv sub_group_reduce_add(ulong);\n" "float __ovld __conv sub_group_reduce_add(float);\n" "int __ovld __conv sub_group_reduce_min(int );\n" "uint __ovld __conv sub_group_reduce_min(uint );\n" "long __ovld __conv sub_group_reduce_min(long );\n" "ulong __ovld __conv sub_group_reduce_min(ulong);\n" "float __ovld __conv sub_group_reduce_min(float);\n" "int __ovld __conv sub_group_reduce_max(int );\n" "uint __ovld __conv sub_group_reduce_max(uint );\n" "long __ovld __conv sub_group_reduce_max(long );\n" "ulong __ovld __conv sub_group_reduce_max(ulong);\n" "float __ovld __conv sub_group_reduce_max(float);\n" "\n" "int __ovld __conv sub_group_scan_exclusive_add(int );\n" "uint __ovld __conv sub_group_scan_exclusive_add(uint );\n" "long __ovld __conv sub_group_scan_exclusive_add(long );\n" "ulong __ovld __conv sub_group_scan_exclusive_add(ulong);\n" "float __ovld __conv sub_group_scan_exclusive_add(float);\n" "int __ovld __conv sub_group_scan_exclusive_min(int );\n" "uint __ovld __conv sub_group_scan_exclusive_min(uint );\n" "long __ovld __conv sub_group_scan_exclusive_min(long );\n" "ulong __ovld __conv sub_group_scan_exclusive_min(ulong);\n" "float __ovld __conv sub_group_scan_exclusive_min(float);\n" "int __ovld __conv sub_group_scan_exclusive_max(int );\n" "uint __ovld __conv sub_group_scan_exclusive_max(uint );\n" "long __ovld __conv sub_group_scan_exclusive_max(long );\n" "ulong __ovld __conv sub_group_scan_exclusive_max(ulong);\n" "float __ovld __conv sub_group_scan_exclusive_max(float);\n" "\n" "int __ovld __conv sub_group_scan_inclusive_add(int );\n" "uint __ovld __conv sub_group_scan_inclusive_add(uint );\n" "long __ovld __conv sub_group_scan_inclusive_add(long );\n" "ulong __ovld __conv sub_group_scan_inclusive_add(ulong);\n" "float __ovld __conv sub_group_scan_inclusive_add(float);\n" "int __ovld __conv sub_group_scan_inclusive_min(int );\n" "uint __ovld __conv sub_group_scan_inclusive_min(uint );\n" "long __ovld __conv sub_group_scan_inclusive_min(long );\n" "ulong __ovld __conv sub_group_scan_inclusive_min(ulong);\n" "float __ovld __conv sub_group_scan_inclusive_min(float);\n" "int __ovld __conv sub_group_scan_inclusive_max(int );\n" "uint __ovld __conv sub_group_scan_inclusive_max(uint );\n" "long __ovld __conv sub_group_scan_inclusive_max(long );\n" "ulong __ovld __conv sub_group_scan_inclusive_max(ulong);\n" "float __ovld __conv sub_group_scan_inclusive_max(float);\n" "\n" "#ifdef cl_khr_fp16\n" "half __ovld __conv sub_group_broadcast(half, uint sub_group_local_id);\n" "half __ovld __conv sub_group_reduce_add(half);\n" "half __ovld __conv sub_group_reduce_min(half);\n" "half __ovld __conv sub_group_reduce_max(half);\n" "half __ovld __conv sub_group_scan_exclusive_add(half);\n" "half __ovld __conv sub_group_scan_exclusive_min(half);\n" "half __ovld __conv sub_group_scan_exclusive_max(half);\n" "half __ovld __conv sub_group_scan_inclusive_add(half);\n" "half __ovld __conv sub_group_scan_inclusive_min(half);\n" "half __ovld __conv sub_group_scan_inclusive_max(half);\n" "#endif //cl_khr_fp16\n" "\n" "#ifdef cl_khr_fp64\n" "double __ovld __conv sub_group_broadcast(double, uint sub_group_local_id);\n" "double __ovld __conv sub_group_reduce_add(double);\n" "double __ovld __conv sub_group_reduce_min(double);\n" "double __ovld __conv sub_group_reduce_max(double);\n" "double __ovld __conv sub_group_scan_exclusive_add(double);\n" "double __ovld __conv sub_group_scan_exclusive_min(double);\n" "double __ovld __conv sub_group_scan_exclusive_max(double);\n" "double __ovld __conv sub_group_scan_inclusive_add(double);\n" "double __ovld __conv sub_group_scan_inclusive_min(double);\n" "double __ovld __conv sub_group_scan_inclusive_max(double);\n" "#endif //cl_khr_fp64\n" "\n" "#endif // __opencl_subgroup_builtins\n" "\n" "#if defined(cl_khr_subgroup_extended_types)\n" "char __ovld __conv sub_group_broadcast( char value, uint index );\n" "char2 __ovld __conv sub_group_broadcast( char2 value, uint index );\n" "char3 __ovld __conv sub_group_broadcast( char3 value, uint index );\n" "char4 __ovld __conv sub_group_broadcast( char4 value, uint index );\n" "char8 __ovld __conv sub_group_broadcast( char8 value, uint index );\n" "char16 __ovld __conv sub_group_broadcast( char16 value, uint index );\n" "\n" "uchar __ovld __conv sub_group_broadcast( uchar value, uint index );\n" "uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );\n" "uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );\n" "uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );\n" "uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );\n" "uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );\n" "\n" "short __ovld __conv sub_group_broadcast( short value, uint index );\n" "short2 __ovld __conv sub_group_broadcast( short2 value, uint index );\n" "short3 __ovld __conv sub_group_broadcast( short3 value, uint index );\n" "short4 __ovld __conv sub_group_broadcast( short4 value, uint index );\n" "short8 __ovld __conv sub_group_broadcast( short8 value, uint index );\n" "short16 __ovld __conv sub_group_broadcast( short16 value, uint index );\n" "\n" "ushort __ovld __conv sub_group_broadcast( ushort value, uint index );\n" "ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );\n" "ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );\n" "ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );\n" "ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );\n" "ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );\n" "\n" "// scalar int broadcast is part of cl_khr_subgroups\n" "int2 __ovld __conv sub_group_broadcast( int2 value, uint index );\n" "int3 __ovld __conv sub_group_broadcast( int3 value, uint index );\n" "int4 __ovld __conv sub_group_broadcast( int4 value, uint index );\n" "int8 __ovld __conv sub_group_broadcast( int8 value, uint index );\n" "int16 __ovld __conv sub_group_broadcast( int16 value, uint index );\n" "\n" "// scalar uint broadcast is part of cl_khr_subgroups\n" "uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );\n" "uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );\n" "uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );\n" "uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );\n" "uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );\n" "\n" "// scalar long broadcast is part of cl_khr_subgroups\n" "long2 __ovld __conv sub_group_broadcast( long2 value, uint index );\n" "long3 __ovld __conv sub_group_broadcast( long3 value, uint index );\n" "long4 __ovld __conv sub_group_broadcast( long4 value, uint index );\n" "long8 __ovld __conv sub_group_broadcast( long8 value, uint index );\n" "long16 __ovld __conv sub_group_broadcast( long16 value, uint index );\n" "\n" "// scalar ulong broadcast is part of cl_khr_subgroups\n" "ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );\n" "ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );\n" "ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );\n" "ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );\n" "ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );\n" "\n" "// scalar float broadcast is part of cl_khr_subgroups\n" "float2 __ovld __conv sub_group_broadcast( float2 value, uint index );\n" "float3 __ovld __conv sub_group_broadcast( float3 value, uint index );\n" "float4 __ovld __conv sub_group_broadcast( float4 value, uint index );\n" "float8 __ovld __conv sub_group_broadcast( float8 value, uint index );\n" "float16 __ovld __conv sub_group_broadcast( float16 value, uint index );\n" "\n" "char __ovld __conv sub_group_reduce_add( char value );\n" "uchar __ovld __conv sub_group_reduce_add( uchar value );\n" "short __ovld __conv sub_group_reduce_add( short value );\n" "ushort __ovld __conv sub_group_reduce_add( ushort value );\n" "\n" "char __ovld __conv sub_group_reduce_min( char value );\n" "uchar __ovld __conv sub_group_reduce_min( uchar value );\n" "short __ovld __conv sub_group_reduce_min( short value );\n" "ushort __ovld __conv sub_group_reduce_min( ushort value );\n" "\n" "char __ovld __conv sub_group_reduce_max( char value );\n" "uchar __ovld __conv sub_group_reduce_max( uchar value );\n" "short __ovld __conv sub_group_reduce_max( short value );\n" "ushort __ovld __conv sub_group_reduce_max( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_inclusive_add( char value );\n" "uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );\n" "short __ovld __conv sub_group_scan_inclusive_add( short value );\n" "ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_inclusive_min( char value );\n" "uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );\n" "short __ovld __conv sub_group_scan_inclusive_min( short value );\n" "ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_inclusive_max( char value );\n" "uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );\n" "short __ovld __conv sub_group_scan_inclusive_max( short value );\n" "ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_exclusive_add( char value );\n" "uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );\n" "short __ovld __conv sub_group_scan_exclusive_add( short value );\n" "ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_exclusive_min( char value );\n" "uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );\n" "short __ovld __conv sub_group_scan_exclusive_min( short value );\n" "ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );\n" "\n" "char __ovld __conv sub_group_scan_exclusive_max( char value );\n" "uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );\n" "short __ovld __conv sub_group_scan_exclusive_max( short value );\n" "ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );\n" "\n" "#if defined(cl_khr_fp16)\n" "// scalar half broadcast is part of cl_khr_subgroups\n" "half2 __ovld __conv sub_group_broadcast( half2 value, uint index );\n" "half3 __ovld __conv sub_group_broadcast( half3 value, uint index );\n" "half4 __ovld __conv sub_group_broadcast( half4 value, uint index );\n" "half8 __ovld __conv sub_group_broadcast( half8 value, uint index );\n" "half16 __ovld __conv sub_group_broadcast( half16 value, uint index );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "// scalar double broadcast is part of cl_khr_subgroups\n" "double2 __ovld __conv sub_group_broadcast( double2 value, uint index );\n" "double3 __ovld __conv sub_group_broadcast( double3 value, uint index );\n" "double4 __ovld __conv sub_group_broadcast( double4 value, uint index );\n" "double8 __ovld __conv sub_group_broadcast( double8 value, uint index );\n" "double16 __ovld __conv sub_group_broadcast( double16 value, uint index );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_extended_types\n" "\n" "#if defined(cl_khr_subgroup_non_uniform_vote)\n" "int __ovld sub_group_elect(void);\n" "int __ovld sub_group_non_uniform_all( int predicate );\n" "int __ovld sub_group_non_uniform_any( int predicate );\n" "\n" "int __ovld sub_group_non_uniform_all_equal( char value );\n" "int __ovld sub_group_non_uniform_all_equal( uchar value );\n" "int __ovld sub_group_non_uniform_all_equal( short value );\n" "int __ovld sub_group_non_uniform_all_equal( ushort value );\n" "int __ovld sub_group_non_uniform_all_equal( int value );\n" "int __ovld sub_group_non_uniform_all_equal( uint value );\n" "int __ovld sub_group_non_uniform_all_equal( long value );\n" "int __ovld sub_group_non_uniform_all_equal( ulong value );\n" "int __ovld sub_group_non_uniform_all_equal( float value );\n" "\n" "#if defined(cl_khr_fp16)\n" "int __ovld sub_group_non_uniform_all_equal( half value );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "int __ovld sub_group_non_uniform_all_equal( double value );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_non_uniform_vote\n" "\n" "#if defined(cl_khr_subgroup_ballot)\n" "char __ovld sub_group_non_uniform_broadcast( char value, uint index );\n" "char2 __ovld sub_group_non_uniform_broadcast( char2 value, uint index );\n" "char3 __ovld sub_group_non_uniform_broadcast( char3 value, uint index );\n" "char4 __ovld sub_group_non_uniform_broadcast( char4 value, uint index );\n" "char8 __ovld sub_group_non_uniform_broadcast( char8 value, uint index );\n" "char16 __ovld sub_group_non_uniform_broadcast( char16 value, uint index );\n" "\n" "uchar __ovld sub_group_non_uniform_broadcast( uchar value, uint index );\n" "uchar2 __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );\n" "uchar3 __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );\n" "uchar4 __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );\n" "uchar8 __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );\n" "uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );\n" "\n" "short __ovld sub_group_non_uniform_broadcast( short value, uint index );\n" "short2 __ovld sub_group_non_uniform_broadcast( short2 value, uint index );\n" "short3 __ovld sub_group_non_uniform_broadcast( short3 value, uint index );\n" "short4 __ovld sub_group_non_uniform_broadcast( short4 value, uint index );\n" "short8 __ovld sub_group_non_uniform_broadcast( short8 value, uint index );\n" "short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );\n" "\n" "ushort __ovld sub_group_non_uniform_broadcast( ushort value, uint index );\n" "ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );\n" "ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );\n" "ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );\n" "ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );\n" "ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );\n" "\n" "int __ovld sub_group_non_uniform_broadcast( int value, uint index );\n" "int2 __ovld sub_group_non_uniform_broadcast( int2 value, uint index );\n" "int3 __ovld sub_group_non_uniform_broadcast( int3 value, uint index );\n" "int4 __ovld sub_group_non_uniform_broadcast( int4 value, uint index );\n" "int8 __ovld sub_group_non_uniform_broadcast( int8 value, uint index );\n" "int16 __ovld sub_group_non_uniform_broadcast( int16 value, uint index );\n" "\n" "uint __ovld sub_group_non_uniform_broadcast( uint value, uint index );\n" "uint2 __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );\n" "uint3 __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );\n" "uint4 __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );\n" "uint8 __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );\n" "uint16 __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );\n" "\n" "long __ovld sub_group_non_uniform_broadcast( long value, uint index );\n" "long2 __ovld sub_group_non_uniform_broadcast( long2 value, uint index );\n" "long3 __ovld sub_group_non_uniform_broadcast( long3 value, uint index );\n" "long4 __ovld sub_group_non_uniform_broadcast( long4 value, uint index );\n" "long8 __ovld sub_group_non_uniform_broadcast( long8 value, uint index );\n" "long16 __ovld sub_group_non_uniform_broadcast( long16 value, uint index );\n" "\n" "ulong __ovld sub_group_non_uniform_broadcast( ulong value, uint index );\n" "ulong2 __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );\n" "ulong3 __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );\n" "ulong4 __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );\n" "ulong8 __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );\n" "ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );\n" "\n" "float __ovld sub_group_non_uniform_broadcast( float value, uint index );\n" "float2 __ovld sub_group_non_uniform_broadcast( float2 value, uint index );\n" "float3 __ovld sub_group_non_uniform_broadcast( float3 value, uint index );\n" "float4 __ovld sub_group_non_uniform_broadcast( float4 value, uint index );\n" "float8 __ovld sub_group_non_uniform_broadcast( float8 value, uint index );\n" "float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );\n" "\n" "char __ovld sub_group_broadcast_first( char value );\n" "uchar __ovld sub_group_broadcast_first( uchar value );\n" "short __ovld sub_group_broadcast_first( short value );\n" "ushort __ovld sub_group_broadcast_first( ushort value );\n" "int __ovld sub_group_broadcast_first( int value );\n" "uint __ovld sub_group_broadcast_first( uint value );\n" "long __ovld sub_group_broadcast_first( long value );\n" "ulong __ovld sub_group_broadcast_first( ulong value );\n" "float __ovld sub_group_broadcast_first( float value );\n" "\n" "uint4 __ovld sub_group_ballot( int predicate );\n" "int __ovld __cnfn sub_group_inverse_ballot( uint4 value );\n" "int __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );\n" "uint __ovld __cnfn sub_group_ballot_bit_count( uint4 value );\n" "\n" "uint __ovld sub_group_ballot_inclusive_scan( uint4 value );\n" "uint __ovld sub_group_ballot_exclusive_scan( uint4 value );\n" "uint __ovld sub_group_ballot_find_lsb( uint4 value );\n" "uint __ovld sub_group_ballot_find_msb( uint4 value );\n" "\n" "uint4 __ovld __cnfn get_sub_group_eq_mask(void);\n" "uint4 __ovld __cnfn get_sub_group_ge_mask(void);\n" "uint4 __ovld __cnfn get_sub_group_gt_mask(void);\n" "uint4 __ovld __cnfn get_sub_group_le_mask(void);\n" "uint4 __ovld __cnfn get_sub_group_lt_mask(void);\n" "\n" "#if defined(cl_khr_fp16)\n" "half __ovld sub_group_non_uniform_broadcast( half value, uint index );\n" "half2 __ovld sub_group_non_uniform_broadcast( half2 value, uint index );\n" "half3 __ovld sub_group_non_uniform_broadcast( half3 value, uint index );\n" "half4 __ovld sub_group_non_uniform_broadcast( half4 value, uint index );\n" "half8 __ovld sub_group_non_uniform_broadcast( half8 value, uint index );\n" "half16 __ovld sub_group_non_uniform_broadcast( half16 value, uint index );\n" "\n" "half __ovld sub_group_broadcast_first( half value );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld sub_group_non_uniform_broadcast( double value, uint index );\n" "double2 __ovld sub_group_non_uniform_broadcast( double2 value, uint index );\n" "double3 __ovld sub_group_non_uniform_broadcast( double3 value, uint index );\n" "double4 __ovld sub_group_non_uniform_broadcast( double4 value, uint index );\n" "double8 __ovld sub_group_non_uniform_broadcast( double8 value, uint index );\n" "double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );\n" "\n" "double __ovld sub_group_broadcast_first( double value );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_ballot\n" "\n" "#if defined(cl_khr_subgroup_non_uniform_arithmetic)\n" "char __ovld sub_group_non_uniform_reduce_add( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_add( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_add( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_add( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_add( int value );\n" "uint __ovld sub_group_non_uniform_reduce_add( uint value );\n" "long __ovld sub_group_non_uniform_reduce_add( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_add( ulong value );\n" "float __ovld sub_group_non_uniform_reduce_add( float value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_mul( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_mul( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_mul( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_mul( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_mul( int value );\n" "uint __ovld sub_group_non_uniform_reduce_mul( uint value );\n" "long __ovld sub_group_non_uniform_reduce_mul( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_mul( ulong value );\n" "float __ovld sub_group_non_uniform_reduce_mul( float value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_min( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_min( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_min( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_min( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_min( int value );\n" "uint __ovld sub_group_non_uniform_reduce_min( uint value );\n" "long __ovld sub_group_non_uniform_reduce_min( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_min( ulong value );\n" "float __ovld sub_group_non_uniform_reduce_min( float value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_max( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_max( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_max( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_max( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_max( int value );\n" "uint __ovld sub_group_non_uniform_reduce_max( uint value );\n" "long __ovld sub_group_non_uniform_reduce_max( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_max( ulong value );\n" "float __ovld sub_group_non_uniform_reduce_max( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_add( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_add( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_add( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_add( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_add( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );\n" "float __ovld sub_group_non_uniform_scan_inclusive_add( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_mul( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_mul( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_mul( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_mul( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );\n" "float __ovld sub_group_non_uniform_scan_inclusive_mul( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_min( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_min( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_min( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_min( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_min( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );\n" "float __ovld sub_group_non_uniform_scan_inclusive_min( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_max( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_max( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_max( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_max( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_max( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );\n" "float __ovld sub_group_non_uniform_scan_inclusive_max( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_add( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_add( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_add( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_add( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_add( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );\n" "float __ovld sub_group_non_uniform_scan_exclusive_add( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_mul( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_mul( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_mul( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_mul( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );\n" "float __ovld sub_group_non_uniform_scan_exclusive_mul( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_min( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_min( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_min( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_min( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_min( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );\n" "float __ovld sub_group_non_uniform_scan_exclusive_min( float value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_max( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_max( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_max( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_max( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_max( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );\n" "float __ovld sub_group_non_uniform_scan_exclusive_max( float value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_and( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_and( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_and( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_and( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_and( int value );\n" "uint __ovld sub_group_non_uniform_reduce_and( uint value );\n" "long __ovld sub_group_non_uniform_reduce_and( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_and( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_or( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_or( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_or( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_or( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_or( int value );\n" "uint __ovld sub_group_non_uniform_reduce_or( uint value );\n" "long __ovld sub_group_non_uniform_reduce_or( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_or( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_reduce_xor( char value );\n" "uchar __ovld sub_group_non_uniform_reduce_xor( uchar value );\n" "short __ovld sub_group_non_uniform_reduce_xor( short value );\n" "ushort __ovld sub_group_non_uniform_reduce_xor( ushort value );\n" "int __ovld sub_group_non_uniform_reduce_xor( int value );\n" "uint __ovld sub_group_non_uniform_reduce_xor( uint value );\n" "long __ovld sub_group_non_uniform_reduce_xor( long value );\n" "ulong __ovld sub_group_non_uniform_reduce_xor( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_and( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_and( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_and( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_and( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_and( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_or( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_or( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_or( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_or( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_or( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_inclusive_xor( char value );\n" "uchar __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );\n" "short __ovld sub_group_non_uniform_scan_inclusive_xor( short value );\n" "ushort __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );\n" "int __ovld sub_group_non_uniform_scan_inclusive_xor( int value );\n" "uint __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );\n" "long __ovld sub_group_non_uniform_scan_inclusive_xor( long value );\n" "ulong __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_and( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_and( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_and( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_and( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_and( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_or( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_or( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_or( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_or( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_or( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );\n" "\n" "char __ovld sub_group_non_uniform_scan_exclusive_xor( char value );\n" "uchar __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );\n" "short __ovld sub_group_non_uniform_scan_exclusive_xor( short value );\n" "ushort __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );\n" "int __ovld sub_group_non_uniform_scan_exclusive_xor( int value );\n" "uint __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );\n" "long __ovld sub_group_non_uniform_scan_exclusive_xor( long value );\n" "ulong __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );\n" "\n" "int __ovld sub_group_non_uniform_reduce_logical_and( int predicate );\n" "int __ovld sub_group_non_uniform_reduce_logical_or( int predicate );\n" "int __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );\n" "\n" "int __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );\n" "int __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );\n" "int __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );\n" "\n" "int __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );\n" "int __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );\n" "int __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );\n" "\n" "#if defined(cl_khr_fp16)\n" "half __ovld sub_group_non_uniform_reduce_add( half value );\n" "half __ovld sub_group_non_uniform_reduce_mul( half value );\n" "half __ovld sub_group_non_uniform_reduce_min( half value );\n" "half __ovld sub_group_non_uniform_reduce_max( half value );\n" "half __ovld sub_group_non_uniform_scan_inclusive_add( half value );\n" "half __ovld sub_group_non_uniform_scan_inclusive_mul( half value );\n" "half __ovld sub_group_non_uniform_scan_inclusive_min( half value );\n" "half __ovld sub_group_non_uniform_scan_inclusive_max( half value );\n" "half __ovld sub_group_non_uniform_scan_exclusive_add( half value );\n" "half __ovld sub_group_non_uniform_scan_exclusive_mul( half value );\n" "half __ovld sub_group_non_uniform_scan_exclusive_min( half value );\n" "half __ovld sub_group_non_uniform_scan_exclusive_max( half value );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld sub_group_non_uniform_reduce_add( double value );\n" "double __ovld sub_group_non_uniform_reduce_mul( double value );\n" "double __ovld sub_group_non_uniform_reduce_min( double value );\n" "double __ovld sub_group_non_uniform_reduce_max( double value );\n" "double __ovld sub_group_non_uniform_scan_inclusive_add( double value );\n" "double __ovld sub_group_non_uniform_scan_inclusive_mul( double value );\n" "double __ovld sub_group_non_uniform_scan_inclusive_min( double value );\n" "double __ovld sub_group_non_uniform_scan_inclusive_max( double value );\n" "double __ovld sub_group_non_uniform_scan_exclusive_add( double value );\n" "double __ovld sub_group_non_uniform_scan_exclusive_mul( double value );\n" "double __ovld sub_group_non_uniform_scan_exclusive_min( double value );\n" "double __ovld sub_group_non_uniform_scan_exclusive_max( double value );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_non_uniform_arithmetic\n" "\n" "#if defined(cl_khr_subgroup_shuffle)\n" "char __ovld sub_group_shuffle( char value, uint index );\n" "uchar __ovld sub_group_shuffle( uchar value, uint index );\n" "short __ovld sub_group_shuffle( short value, uint index );\n" "ushort __ovld sub_group_shuffle( ushort value, uint index );\n" "int __ovld sub_group_shuffle( int value, uint index );\n" "uint __ovld sub_group_shuffle( uint value, uint index );\n" "long __ovld sub_group_shuffle( long value, uint index );\n" "ulong __ovld sub_group_shuffle( ulong value, uint index );\n" "float __ovld sub_group_shuffle( float value, uint index );\n" "\n" "char __ovld sub_group_shuffle_xor( char value, uint mask );\n" "uchar __ovld sub_group_shuffle_xor( uchar value, uint mask );\n" "short __ovld sub_group_shuffle_xor( short value, uint mask );\n" "ushort __ovld sub_group_shuffle_xor( ushort value, uint mask );\n" "int __ovld sub_group_shuffle_xor( int value, uint mask );\n" "uint __ovld sub_group_shuffle_xor( uint value, uint mask );\n" "long __ovld sub_group_shuffle_xor( long value, uint mask );\n" "ulong __ovld sub_group_shuffle_xor( ulong value, uint mask );\n" "float __ovld sub_group_shuffle_xor( float value, uint mask );\n" "\n" "#if defined(cl_khr_fp16)\n" "half __ovld sub_group_shuffle( half value, uint index );\n" "half __ovld sub_group_shuffle_xor( half value, uint mask );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld sub_group_shuffle( double value, uint index );\n" "double __ovld sub_group_shuffle_xor( double value, uint mask );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_shuffle\n" "\n" "#if defined(cl_khr_subgroup_shuffle_relative)\n" "char __ovld sub_group_shuffle_up( char value, uint delta );\n" "uchar __ovld sub_group_shuffle_up( uchar value, uint delta );\n" "short __ovld sub_group_shuffle_up( short value, uint delta );\n" "ushort __ovld sub_group_shuffle_up( ushort value, uint delta );\n" "int __ovld sub_group_shuffle_up( int value, uint delta );\n" "uint __ovld sub_group_shuffle_up( uint value, uint delta );\n" "long __ovld sub_group_shuffle_up( long value, uint delta );\n" "ulong __ovld sub_group_shuffle_up( ulong value, uint delta );\n" "float __ovld sub_group_shuffle_up( float value, uint delta );\n" "\n" "char __ovld sub_group_shuffle_down( char value, uint delta );\n" "uchar __ovld sub_group_shuffle_down( uchar value, uint delta );\n" "short __ovld sub_group_shuffle_down( short value, uint delta );\n" "ushort __ovld sub_group_shuffle_down( ushort value, uint delta );\n" "int __ovld sub_group_shuffle_down( int value, uint delta );\n" "uint __ovld sub_group_shuffle_down( uint value, uint delta );\n" "long __ovld sub_group_shuffle_down( long value, uint delta );\n" "ulong __ovld sub_group_shuffle_down( ulong value, uint delta );\n" "float __ovld sub_group_shuffle_down( float value, uint delta );\n" "\n" "#if defined(cl_khr_fp16)\n" "half __ovld sub_group_shuffle_up( half value, uint delta );\n" "half __ovld sub_group_shuffle_down( half value, uint delta );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld sub_group_shuffle_up( double value, uint delta );\n" "double __ovld sub_group_shuffle_down( double value, uint delta );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_shuffle_relative\n" "\n" "#if defined(cl_khr_subgroup_clustered_reduce)\n" "char __ovld sub_group_clustered_reduce_add( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_add( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_add( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_add( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );\n" "float __ovld sub_group_clustered_reduce_add( float value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );\n" "float __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_min( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_min( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_min( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_min( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );\n" "float __ovld sub_group_clustered_reduce_min( float value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_max( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_max( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_max( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_max( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );\n" "float __ovld sub_group_clustered_reduce_max( float value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_and( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_and( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_and( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_and( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_or( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_or( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_or( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_or( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );\n" "\n" "char __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );\n" "uchar __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );\n" "short __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );\n" "ushort __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );\n" "uint __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );\n" "long __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );\n" "ulong __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );\n" "\n" "int __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );\n" "int __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );\n" "\n" "#if defined(cl_khr_fp16)\n" "half __ovld sub_group_clustered_reduce_add( half value, uint clustersize );\n" "half __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );\n" "half __ovld sub_group_clustered_reduce_min( half value, uint clustersize );\n" "half __ovld sub_group_clustered_reduce_max( half value, uint clustersize );\n" "#endif // cl_khr_fp16\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld sub_group_clustered_reduce_add( double value, uint clustersize );\n" "double __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );\n" "double __ovld sub_group_clustered_reduce_min( double value, uint clustersize );\n" "double __ovld sub_group_clustered_reduce_max( double value, uint clustersize );\n" "#endif // cl_khr_fp64\n" "\n" "#endif // cl_khr_subgroup_clustered_reduce\n" "\n" "#if defined(cl_khr_extended_bit_ops)\n" "char __ovld __cnfn bitfield_insert(char, char, uint, uint);\n" "uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);\n" "short __ovld __cnfn bitfield_insert(short, short, uint, uint);\n" "ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);\n" "int __ovld __cnfn bitfield_insert(int, int, uint, uint);\n" "uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);\n" "long __ovld __cnfn bitfield_insert(long, long, uint, uint);\n" "ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);\n" "char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);\n" "uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);\n" "short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);\n" "ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);\n" "int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);\n" "uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);\n" "long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);\n" "ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);\n" "char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);\n" "uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);\n" "short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);\n" "ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);\n" "int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);\n" "uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);\n" "long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);\n" "ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);\n" "char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);\n" "uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);\n" "short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);\n" "ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);\n" "int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);\n" "uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);\n" "long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);\n" "ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);\n" "char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);\n" "uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);\n" "short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);\n" "ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);\n" "int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);\n" "uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);\n" "long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);\n" "ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);\n" "char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);\n" "uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);\n" "short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);\n" "ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);\n" "int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);\n" "uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);\n" "long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);\n" "ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);\n" "\n" "char __ovld __cnfn bitfield_extract_signed(char, uint, uint);\n" "short __ovld __cnfn bitfield_extract_signed(short, uint, uint);\n" "int __ovld __cnfn bitfield_extract_signed(int, uint, uint);\n" "long __ovld __cnfn bitfield_extract_signed(long, uint, uint);\n" "char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);\n" "short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);\n" "int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);\n" "long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);\n" "char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);\n" "short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);\n" "int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);\n" "long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);\n" "char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);\n" "short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);\n" "int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);\n" "long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);\n" "char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);\n" "short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);\n" "int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);\n" "long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);\n" "char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);\n" "short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);\n" "int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);\n" "long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);\n" "\n" "char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);\n" "short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);\n" "int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);\n" "long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);\n" "char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);\n" "short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);\n" "int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);\n" "long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);\n" "char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);\n" "short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);\n" "int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);\n" "long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);\n" "char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);\n" "short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);\n" "int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);\n" "long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);\n" "char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);\n" "short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);\n" "int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);\n" "long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);\n" "char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);\n" "short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);\n" "int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);\n" "long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);\n" "\n" "uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);\n" "ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);\n" "uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);\n" "ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);\n" "uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);\n" "ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);\n" "uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);\n" "ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);\n" "uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);\n" "ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);\n" "uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);\n" "ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);\n" "uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);\n" "ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);\n" "uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);\n" "ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);\n" "uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);\n" "ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);\n" "uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);\n" "ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);\n" "uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);\n" "ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);\n" "uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);\n" "ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);\n" "\n" "uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);\n" "ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);\n" "uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);\n" "ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);\n" "uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);\n" "ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);\n" "uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);\n" "ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);\n" "uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);\n" "ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);\n" "uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);\n" "ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);\n" "uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);\n" "ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);\n" "uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);\n" "ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);\n" "uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);\n" "ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);\n" "uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);\n" "ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);\n" "uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);\n" "ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);\n" "uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);\n" "ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);\n" "\n" "char __ovld __cnfn bit_reverse(char);\n" "uchar __ovld __cnfn bit_reverse(uchar);\n" "short __ovld __cnfn bit_reverse(short);\n" "ushort __ovld __cnfn bit_reverse(ushort);\n" "int __ovld __cnfn bit_reverse(int);\n" "uint __ovld __cnfn bit_reverse(uint);\n" "long __ovld __cnfn bit_reverse(long);\n" "ulong __ovld __cnfn bit_reverse(ulong);\n" "char2 __ovld __cnfn bit_reverse(char2);\n" "uchar2 __ovld __cnfn bit_reverse(uchar2);\n" "short2 __ovld __cnfn bit_reverse(short2);\n" "ushort2 __ovld __cnfn bit_reverse(ushort2);\n" "int2 __ovld __cnfn bit_reverse(int2);\n" "uint2 __ovld __cnfn bit_reverse(uint2);\n" "long2 __ovld __cnfn bit_reverse(long2);\n" "ulong2 __ovld __cnfn bit_reverse(ulong2);\n" "char3 __ovld __cnfn bit_reverse(char3);\n" "uchar3 __ovld __cnfn bit_reverse(uchar3);\n" "short3 __ovld __cnfn bit_reverse(short3);\n" "ushort3 __ovld __cnfn bit_reverse(ushort3);\n" "int3 __ovld __cnfn bit_reverse(int3);\n" "uint3 __ovld __cnfn bit_reverse(uint3);\n" "long3 __ovld __cnfn bit_reverse(long3);\n" "ulong3 __ovld __cnfn bit_reverse(ulong3);\n" "char4 __ovld __cnfn bit_reverse(char4);\n" "uchar4 __ovld __cnfn bit_reverse(uchar4);\n" "short4 __ovld __cnfn bit_reverse(short4);\n" "ushort4 __ovld __cnfn bit_reverse(ushort4);\n" "int4 __ovld __cnfn bit_reverse(int4);\n" "uint4 __ovld __cnfn bit_reverse(uint4);\n" "long4 __ovld __cnfn bit_reverse(long4);\n" "ulong4 __ovld __cnfn bit_reverse(ulong4);\n" "char8 __ovld __cnfn bit_reverse(char8);\n" "uchar8 __ovld __cnfn bit_reverse(uchar8);\n" "short8 __ovld __cnfn bit_reverse(short8);\n" "ushort8 __ovld __cnfn bit_reverse(ushort8);\n" "int8 __ovld __cnfn bit_reverse(int8);\n" "uint8 __ovld __cnfn bit_reverse(uint8);\n" "long8 __ovld __cnfn bit_reverse(long8);\n" "ulong8 __ovld __cnfn bit_reverse(ulong8);\n" "char16 __ovld __cnfn bit_reverse(char16);\n" "uchar16 __ovld __cnfn bit_reverse(uchar16);\n" "short16 __ovld __cnfn bit_reverse(short16);\n" "ushort16 __ovld __cnfn bit_reverse(ushort16);\n" "int16 __ovld __cnfn bit_reverse(int16);\n" "uint16 __ovld __cnfn bit_reverse(uint16);\n" "long16 __ovld __cnfn bit_reverse(long16);\n" "ulong16 __ovld __cnfn bit_reverse(ulong16);\n" "#endif // cl_khr_extended_bit_ops\n" "\n" "#if defined(__opencl_c_integer_dot_product_input_4x8bit)\n" "uint __ovld __cnfn dot(uchar4, uchar4);\n" "int __ovld __cnfn dot(char4, char4);\n" "int __ovld __cnfn dot(uchar4, char4);\n" "int __ovld __cnfn dot(char4, uchar4);\n" "\n" "uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);\n" "int __ovld __cnfn dot_acc_sat(char4, char4, int);\n" "int __ovld __cnfn dot_acc_sat(uchar4, char4, int);\n" "int __ovld __cnfn dot_acc_sat(char4, uchar4, int);\n" "#endif // __opencl_c_integer_dot_product_input_4x8bit\n" "\n" "#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)\n" "uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);\n" "int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);\n" "int __ovld __cnfn dot_4x8packed_us_int(uint, uint);\n" "int __ovld __cnfn dot_4x8packed_su_int(uint, uint);\n" "\n" "uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);\n" "int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);\n" "int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);\n" "int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);\n" "#endif // __opencl_c_integer_dot_product_input_4x8bit_packed\n" "\n" "#if defined(cl_khr_subgroup_rotate)\n" "char __ovld __conv sub_group_rotate(char, int);\n" "uchar __ovld __conv sub_group_rotate(uchar, int);\n" "short __ovld __conv sub_group_rotate(short, int);\n" "ushort __ovld __conv sub_group_rotate(ushort, int);\n" "int __ovld __conv sub_group_rotate(int, int);\n" "uint __ovld __conv sub_group_rotate(uint, int);\n" "long __ovld __conv sub_group_rotate(long, int);\n" "ulong __ovld __conv sub_group_rotate(ulong, int);\n" "float __ovld __conv sub_group_rotate(float, int);\n" "#if defined(cl_khr_fp64)\n" "double __ovld __conv sub_group_rotate(double, int);\n" "#endif // cl_khr_fp64\n" "#if defined(cl_khr_fp16)\n" "half __ovld __conv sub_group_rotate(half, int);\n" "#endif // cl_khr_fp16\n" "\n" "char __ovld __conv sub_group_clustered_rotate(char, int, uint);\n" "uchar __ovld __conv sub_group_clustered_rotate(uchar, int, uint);\n" "short __ovld __conv sub_group_clustered_rotate(short, int, uint);\n" "ushort __ovld __conv sub_group_clustered_rotate(ushort, int, uint);\n" "int __ovld __conv sub_group_clustered_rotate(int, int, uint);\n" "uint __ovld __conv sub_group_clustered_rotate(uint, int, uint);\n" "long __ovld __conv sub_group_clustered_rotate(long, int, uint);\n" "ulong __ovld __conv sub_group_clustered_rotate(ulong, int, uint);\n" "float __ovld __conv sub_group_clustered_rotate(float, int, uint);\n" "#if defined(cl_khr_fp64)\n" "double __ovld __conv sub_group_clustered_rotate(double, int, uint);\n" "#endif // cl_khr_fp64\n" "#if defined(cl_khr_fp16)\n" "half __ovld __conv sub_group_clustered_rotate(half, int, uint);\n" "#endif // cl_khr_fp16\n" "#endif // cl_khr_subgroup_rotate\n" "\n" "#if defined(cl_intel_subgroups)\n" "// Intel-Specific Sub Group Functions\n" "float __ovld __conv intel_sub_group_shuffle( float , uint );\n" "float2 __ovld __conv intel_sub_group_shuffle( float2, uint );\n" "float3 __ovld __conv intel_sub_group_shuffle( float3, uint );\n" "float4 __ovld __conv intel_sub_group_shuffle( float4, uint );\n" "float8 __ovld __conv intel_sub_group_shuffle( float8, uint );\n" "float16 __ovld __conv intel_sub_group_shuffle( float16, uint );\n" "\n" "int __ovld __conv intel_sub_group_shuffle( int , uint );\n" "int2 __ovld __conv intel_sub_group_shuffle( int2, uint );\n" "int3 __ovld __conv intel_sub_group_shuffle( int3, uint );\n" "int4 __ovld __conv intel_sub_group_shuffle( int4, uint );\n" "int8 __ovld __conv intel_sub_group_shuffle( int8, uint );\n" "int16 __ovld __conv intel_sub_group_shuffle( int16, uint );\n" "\n" "uint __ovld __conv intel_sub_group_shuffle( uint , uint );\n" "uint2 __ovld __conv intel_sub_group_shuffle( uint2, uint );\n" "uint3 __ovld __conv intel_sub_group_shuffle( uint3, uint );\n" "uint4 __ovld __conv intel_sub_group_shuffle( uint4, uint );\n" "uint8 __ovld __conv intel_sub_group_shuffle( uint8, uint );\n" "uint16 __ovld __conv intel_sub_group_shuffle( uint16, uint );\n" "\n" "long __ovld __conv intel_sub_group_shuffle( long, uint );\n" "ulong __ovld __conv intel_sub_group_shuffle( ulong, uint );\n" "\n" "float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint );\n" "float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint );\n" "float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint );\n" "float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint );\n" "float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint );\n" "float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint );\n" "\n" "int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint );\n" "int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint );\n" "int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint );\n" "int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint );\n" "int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint );\n" "int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint );\n" "\n" "uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint );\n" "uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint );\n" "uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint );\n" "uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint );\n" "uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint );\n" "uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint );\n" "\n" "long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint );\n" "ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint );\n" "\n" "float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint );\n" "float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint );\n" "float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint );\n" "float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint );\n" "float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint );\n" "float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint );\n" "\n" "int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint );\n" "int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint );\n" "int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint );\n" "int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint );\n" "int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint );\n" "int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint );\n" "\n" "uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint );\n" "uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint );\n" "uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint );\n" "uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint );\n" "uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint );\n" "uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint );\n" "\n" "long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint );\n" "ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint );\n" "\n" "float __ovld __conv intel_sub_group_shuffle_xor( float , uint );\n" "float2 __ovld __conv intel_sub_group_shuffle_xor( float2, uint );\n" "float3 __ovld __conv intel_sub_group_shuffle_xor( float3, uint );\n" "float4 __ovld __conv intel_sub_group_shuffle_xor( float4, uint );\n" "float8 __ovld __conv intel_sub_group_shuffle_xor( float8, uint );\n" "float16 __ovld __conv intel_sub_group_shuffle_xor( float16, uint );\n" "\n" "int __ovld __conv intel_sub_group_shuffle_xor( int , uint );\n" "int2 __ovld __conv intel_sub_group_shuffle_xor( int2, uint );\n" "int3 __ovld __conv intel_sub_group_shuffle_xor( int3, uint );\n" "int4 __ovld __conv intel_sub_group_shuffle_xor( int4, uint );\n" "int8 __ovld __conv intel_sub_group_shuffle_xor( int8, uint );\n" "int16 __ovld __conv intel_sub_group_shuffle_xor( int16, uint );\n" "\n" "uint __ovld __conv intel_sub_group_shuffle_xor( uint , uint );\n" "uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2, uint );\n" "uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3, uint );\n" "uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4, uint );\n" "uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8, uint );\n" "uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16, uint );\n" "\n" "long __ovld __conv intel_sub_group_shuffle_xor( long, uint );\n" "ulong __ovld __conv intel_sub_group_shuffle_xor( ulong, uint );\n" "\n" "#if defined(__opencl_c_images)\n" "uint __ovld __conv intel_sub_group_block_read(read_only image2d_t, int2);\n" "uint2 __ovld __conv intel_sub_group_block_read2(read_only image2d_t, int2);\n" "uint4 __ovld __conv intel_sub_group_block_read4(read_only image2d_t, int2);\n" "uint8 __ovld __conv intel_sub_group_block_read8(read_only image2d_t, int2);\n" "#endif\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "uint __ovld __conv intel_sub_group_block_read(read_write image2d_t, int2);\n" "uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t, int2);\n" "uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t, int2);\n" "uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t, int2);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "uint __ovld __conv intel_sub_group_block_read( const __global uint* p );\n" "uint2 __ovld __conv intel_sub_group_block_read2( const __global uint* p );\n" "uint4 __ovld __conv intel_sub_group_block_read4( const __global uint* p );\n" "uint8 __ovld __conv intel_sub_group_block_read8( const __global uint* p );\n" "\n" "#if defined(__opencl_c_images)\n" "void __ovld __conv intel_sub_group_block_write(write_only image2d_t, int2, uint);\n" "void __ovld __conv intel_sub_group_block_write2(write_only image2d_t, int2, uint2);\n" "void __ovld __conv intel_sub_group_block_write4(write_only image2d_t, int2, uint4);\n" "void __ovld __conv intel_sub_group_block_write8(write_only image2d_t, int2, uint8);\n" "#endif // defined(__opencl_c_images)\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "void __ovld __conv intel_sub_group_block_write(read_write image2d_t, int2, uint);\n" "void __ovld __conv intel_sub_group_block_write2(read_write image2d_t, int2, uint2);\n" "void __ovld __conv intel_sub_group_block_write4(read_write image2d_t, int2, uint4);\n" "void __ovld __conv intel_sub_group_block_write8(read_write image2d_t, int2, uint8);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );\n" "void __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );\n" "void __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );\n" "void __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );\n" "\n" "#ifdef cl_khr_fp16\n" "half __ovld __conv intel_sub_group_shuffle( half, uint );\n" "half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint );\n" "half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint );\n" "half __ovld __conv intel_sub_group_shuffle_xor( half, uint );\n" "#endif\n" "\n" "#if defined(cl_khr_fp64)\n" "double __ovld __conv intel_sub_group_shuffle( double, uint );\n" "double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint );\n" "double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint );\n" "double __ovld __conv intel_sub_group_shuffle_xor( double, uint );\n" "#endif\n" "\n" "#endif //cl_intel_subgroups\n" "\n" "#if defined(cl_intel_subgroups_short)\n" "short __ovld __conv intel_sub_group_broadcast( short , uint sub_group_local_id );\n" "short2 __ovld __conv intel_sub_group_broadcast( short2, uint sub_group_local_id );\n" "short3 __ovld __conv intel_sub_group_broadcast( short3, uint sub_group_local_id );\n" "short4 __ovld __conv intel_sub_group_broadcast( short4, uint sub_group_local_id );\n" "short8 __ovld __conv intel_sub_group_broadcast( short8, uint sub_group_local_id );\n" "\n" "ushort __ovld __conv intel_sub_group_broadcast( ushort , uint sub_group_local_id );\n" "ushort2 __ovld __conv intel_sub_group_broadcast( ushort2, uint sub_group_local_id );\n" "ushort3 __ovld __conv intel_sub_group_broadcast( ushort3, uint sub_group_local_id );\n" "ushort4 __ovld __conv intel_sub_group_broadcast( ushort4, uint sub_group_local_id );\n" "ushort8 __ovld __conv intel_sub_group_broadcast( ushort8, uint sub_group_local_id );\n" "\n" "short __ovld __conv intel_sub_group_shuffle( short , uint );\n" "short2 __ovld __conv intel_sub_group_shuffle( short2 , uint );\n" "short3 __ovld __conv intel_sub_group_shuffle( short3 , uint );\n" "short4 __ovld __conv intel_sub_group_shuffle( short4 , uint );\n" "short8 __ovld __conv intel_sub_group_shuffle( short8 , uint );\n" "short16 __ovld __conv intel_sub_group_shuffle( short16, uint);\n" "\n" "ushort __ovld __conv intel_sub_group_shuffle( ushort , uint );\n" "ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 , uint );\n" "ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 , uint );\n" "ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 , uint );\n" "ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 , uint );\n" "ushort16 __ovld __conv intel_sub_group_shuffle( ushort16, uint );\n" "\n" "short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint );\n" "short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint );\n" "short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint );\n" "short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint );\n" "short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint );\n" "short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint );\n" "\n" "ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint );\n" "ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint );\n" "ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint );\n" "ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint );\n" "ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint );\n" "ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint );\n" "\n" "short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint );\n" "short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint );\n" "short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint );\n" "short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint );\n" "short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint );\n" "short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint );\n" "\n" "ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint );\n" "ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint );\n" "ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint );\n" "ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint );\n" "ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint );\n" "ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint );\n" "\n" "short __ovld __conv intel_sub_group_shuffle_xor( short , uint );\n" "short2 __ovld __conv intel_sub_group_shuffle_xor( short2 , uint );\n" "short3 __ovld __conv intel_sub_group_shuffle_xor( short3 , uint );\n" "short4 __ovld __conv intel_sub_group_shuffle_xor( short4 , uint );\n" "short8 __ovld __conv intel_sub_group_shuffle_xor( short8 , uint );\n" "short16 __ovld __conv intel_sub_group_shuffle_xor( short16, uint );\n" "\n" "ushort __ovld __conv intel_sub_group_shuffle_xor( ushort , uint );\n" "ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 , uint );\n" "ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 , uint );\n" "ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 , uint );\n" "ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 , uint );\n" "ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16, uint );\n" "\n" "short __ovld __conv intel_sub_group_reduce_add( short x );\n" "ushort __ovld __conv intel_sub_group_reduce_add( ushort x );\n" "short __ovld __conv intel_sub_group_reduce_min( short x );\n" "ushort __ovld __conv intel_sub_group_reduce_min( ushort x );\n" "short __ovld __conv intel_sub_group_reduce_max( short x );\n" "ushort __ovld __conv intel_sub_group_reduce_max( ushort x );\n" "\n" "short __ovld __conv intel_sub_group_scan_exclusive_add( short x );\n" "ushort __ovld __conv intel_sub_group_scan_exclusive_add( ushort x );\n" "short __ovld __conv intel_sub_group_scan_exclusive_min( short x );\n" "ushort __ovld __conv intel_sub_group_scan_exclusive_min( ushort x );\n" "short __ovld __conv intel_sub_group_scan_exclusive_max( short x );\n" "ushort __ovld __conv intel_sub_group_scan_exclusive_max( ushort x );\n" "\n" "short __ovld __conv intel_sub_group_scan_inclusive_add( short x );\n" "ushort __ovld __conv intel_sub_group_scan_inclusive_add( ushort x );\n" "short __ovld __conv intel_sub_group_scan_inclusive_min( short x );\n" "ushort __ovld __conv intel_sub_group_scan_inclusive_min( ushort x );\n" "short __ovld __conv intel_sub_group_scan_inclusive_max( short x );\n" "ushort __ovld __conv intel_sub_group_scan_inclusive_max( ushort x );\n" "\n" "#if defined(__opencl_c_images)\n" "uint __ovld __conv intel_sub_group_block_read_ui(read_only image2d_t, int2);\n" "uint2 __ovld __conv intel_sub_group_block_read_ui2(read_only image2d_t, int2);\n" "uint4 __ovld __conv intel_sub_group_block_read_ui4(read_only image2d_t, int2);\n" "uint8 __ovld __conv intel_sub_group_block_read_ui8(read_only image2d_t, int2);\n" "#endif // defined(__opencl_c_images)\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "uint __ovld __conv intel_sub_group_block_read_ui(read_write image2d_t, int2);\n" "uint2 __ovld __conv intel_sub_group_block_read_ui2(read_write image2d_t, int2);\n" "uint4 __ovld __conv intel_sub_group_block_read_ui4(read_write image2d_t, int2);\n" "uint8 __ovld __conv intel_sub_group_block_read_ui8(read_write image2d_t, int2);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );\n" "uint2 __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );\n" "uint4 __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );\n" "uint8 __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );\n" "\n" "#if defined(__opencl_c_images)\n" "void __ovld __conv intel_sub_group_block_write_ui(read_only image2d_t, int2, uint);\n" "void __ovld __conv intel_sub_group_block_write_ui2(read_only image2d_t, int2, uint2);\n" "void __ovld __conv intel_sub_group_block_write_ui4(read_only image2d_t, int2, uint4);\n" "void __ovld __conv intel_sub_group_block_write_ui8(read_only image2d_t, int2, uint8);\n" "#endif //defined(__opencl_c_images)\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "void __ovld __conv intel_sub_group_block_write_ui(read_write image2d_t, int2, uint);\n" "void __ovld __conv intel_sub_group_block_write_ui2(read_write image2d_t, int2, uint2);\n" "void __ovld __conv intel_sub_group_block_write_ui4(read_write image2d_t, int2, uint4);\n" "void __ovld __conv intel_sub_group_block_write_ui8(read_write image2d_t, int2, uint8);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );\n" "void __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );\n" "void __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );\n" "void __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );\n" "\n" "#if defined(__opencl_c_images)\n" "ushort __ovld __conv intel_sub_group_block_read_us(read_only image2d_t, int2);\n" "ushort2 __ovld __conv intel_sub_group_block_read_us2(read_only image2d_t, int2);\n" "ushort4 __ovld __conv intel_sub_group_block_read_us4(read_only image2d_t, int2);\n" "ushort8 __ovld __conv intel_sub_group_block_read_us8(read_only image2d_t, int2);\n" "#endif // defined(__opencl_c_images)\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t, int2);\n" "ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t, int2);\n" "ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t, int2);\n" "ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t, int2);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p );\n" "ushort2 __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );\n" "ushort4 __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );\n" "ushort8 __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );\n" "\n" "#if defined(__opencl_c_images)\n" "void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t, int2, ushort);\n" "void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t, int2, ushort2);\n" "void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t, int2, ushort4);\n" "void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t, int2, ushort8);\n" "#endif // defined(__opencl_c_images)\n" "\n" "#if defined(__opencl_c_read_write_images)\n" "void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t, int2, ushort);\n" "void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t, int2, ushort2);\n" "void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t, int2, ushort4);\n" "void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t, int2, ushort8);\n" "#endif // defined(__opencl_c_read_write_images)\n" "\n" "void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data );\n" "void __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );\n" "void __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );\n" "void __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );\n" "#endif // cl_intel_subgroups_short\n" "\n" "#ifdef cl_intel_device_side_avc_motion_estimation\n" "#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin\n" "\n" "// MCE built-in functions\n" "uchar __ovld\n" "intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(\n" " uchar slice_type, uchar qp);\n" "ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(\n" " uchar slice_type, uchar qp);\n" "uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(\n" " uchar slice_type, uchar qp);\n" "uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(\n" " uchar slice_type, uchar qp);\n" "uint2 __ovld\n" "intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(\n" " uchar slice_type, uchar qp);\n" "uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(\n" " uchar slice_type, uchar qp);\n" "\n" "uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();\n" "uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();\n" "uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();\n" "uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();\n" "uchar __ovld\n" "intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();\n" "\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(\n" " uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_inter_shape_penalty(\n" " ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_inter_direction_penalty(\n" " uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_motion_vector_cost_function(\n" " ulong packed_cost_center_delta, uint2 packed_cost_table,\n" " uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_ac_only_haar(\n" " intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_source_interlaced_field_polarity(\n" " uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(\n" " uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(\n" " uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,\n" " intel_sub_group_avc_mce_payload_t payload);\n" "\n" "ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(\n" " intel_sub_group_avc_mce_result_t result);\n" "ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(\n" " intel_sub_group_avc_mce_result_t result);\n" "ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(\n" " intel_sub_group_avc_mce_result_t result);\n" "uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(\n" " intel_sub_group_avc_mce_result_t result);\n" "uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(\n" " intel_sub_group_avc_mce_result_t result);\n" "uchar __ovld intel_sub_group_avc_mce_get_inter_directions(\n" " intel_sub_group_avc_mce_result_t result);\n" "uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(\n" " intel_sub_group_avc_mce_result_t result);\n" "uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(\n" " intel_sub_group_avc_mce_result_t result);\n" "uchar __ovld\n" "intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(\n" " uint packed_reference_ids, uint packed_reference_parameter_field_polarities,\n" " intel_sub_group_avc_mce_result_t result);\n" "\n" "// IME built-in functions\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_initialize(\n" " ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_single_reference(\n" " short2 ref_offset, uchar search_window_config,\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_dual_reference(\n" " short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_max_motion_vector_count(\n" " uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_unidirectional_mix_disable(\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_early_search_termination_threshold(\n" " uchar threshold, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_weighted_sad(\n" " uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);\n" "\n" "__attribute__((deprecated(\"If you use the latest Intel driver, please use \"\n" " \"intel_sub_group_avc_ime_ref_window_size instead\",\n" " \"intel_sub_group_avc_ime_ref_window_size\")))\n" "ushort2 __ovld\n" "intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);\n" "ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(\n" " uchar search_window_config, char dual_ref);\n" "short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(\n" " short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,\n" " ushort2 image_size);\n" "\n" "#if defined(__opencl_c_images)\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_single_reference(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_dual_reference(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,\n" " intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ime_payload_t payload,\n" " intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);\n" "intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,\n" " intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);\n" "intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld\n" "intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ime_payload_t payload,\n" " intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);\n" "#endif\n" "\n" "intel_sub_group_avc_ime_single_reference_streamin_t __ovld\n" "intel_sub_group_avc_ime_get_single_reference_streamin(\n" " intel_sub_group_avc_ime_result_single_reference_streamout_t result);\n" "intel_sub_group_avc_ime_dual_reference_streamin_t __ovld\n" "intel_sub_group_avc_ime_get_dual_reference_streamin(\n" " intel_sub_group_avc_ime_result_dual_reference_streamout_t result);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_strip_single_reference_streamout(\n" " intel_sub_group_avc_ime_result_single_reference_streamout_t result);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_ime_strip_dual_reference_streamout(\n" " intel_sub_group_avc_ime_result_dual_reference_streamout_t result);\n" "\n" "uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(\n" " intel_sub_group_avc_ime_result_single_reference_streamout_t result,\n" " uchar major_shape);\n" "ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(\n" " intel_sub_group_avc_ime_result_single_reference_streamout_t result,\n" " uchar major_shape);\n" "uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(\n" " intel_sub_group_avc_ime_result_single_reference_streamout_t result,\n" " uchar major_shape);\n" "uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(\n" " intel_sub_group_avc_ime_result_dual_reference_streamout_t result,\n" " uchar major_shape, uchar direction);\n" "ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(\n" " intel_sub_group_avc_ime_result_dual_reference_streamout_t result,\n" " uchar major_shape, uchar direction);\n" "uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(\n" " intel_sub_group_avc_ime_result_dual_reference_streamout_t result,\n" " uchar major_shape, uchar direction);\n" "\n" "uchar __ovld intel_sub_group_avc_ime_get_border_reached(\n" " uchar image_select, intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld\n" "intel_sub_group_avc_ime_get_unidirectional_early_search_termination(\n" " intel_sub_group_avc_ime_result_t result);\n" "uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(\n" " intel_sub_group_avc_ime_result_t result);\n" "ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(\n" " intel_sub_group_avc_ime_result_t result);\n" "\n" "// REF built-in functions\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_fme_initialize(\n" " ushort2 src_coord, ulong motion_vectors, uchar major_shapes,\n" " uchar minor_shapes, uchar directions, uchar pixel_resolution,\n" " uchar sad_adjustment);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_bme_initialize(\n" " ushort2 src_coord, ulong motion_vectors, uchar major_shapes,\n" " uchar minor_shapes, uchar directions, uchar pixel_resolution,\n" " uchar bidirectional_weight, uchar sad_adjustment);\n" "\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_bidirectional_mix_disable(\n" " intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_bilinear_filter_enable(\n" " intel_sub_group_avc_ref_payload_t payload);\n" "\n" "#if defined(__opencl_c_images)\n" "intel_sub_group_avc_ref_result_t __ovld\n" "intel_sub_group_avc_ref_evaluate_with_single_reference(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_ref_result_t __ovld\n" "intel_sub_group_avc_ref_evaluate_with_dual_reference(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_ref_result_t __ovld\n" "intel_sub_group_avc_ref_evaluate_with_multi_reference(\n" " read_only image2d_t src_image, uint packed_reference_ids,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_ref_result_t __ovld\n" "intel_sub_group_avc_ref_evaluate_with_multi_reference(\n" " read_only image2d_t src_image, uint packed_reference_ids,\n" " uchar packed_reference_field_polarities, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_ref_payload_t payload);\n" "#endif //defined(__opencl_c_images)\n" "\n" "// SIC built-in functions\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_initialize(\n" " ushort2 src_coord);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_configure_skc(\n" " uint skip_block_partition_type, uint skip_motion_vector_mask,\n" " ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(\n" " uchar luma_intra_partition_mask, uchar intra_neighbour_availability,\n" " uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,\n" " uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,\n" " uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(\n" " uchar luma_intra_partition_mask, uchar intra_neighbour_availability,\n" " uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,\n" " uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,\n" " ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,\n" " ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "uint __ovld\n" "intel_sub_group_avc_sic_get_motion_vector_mask(\n" " uint skip_block_partition_type, uchar direction);\n" "\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_intra_luma_shape_penalty(\n" " uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(\n" " uchar luma_mode_penalty, uint luma_packed_neighbor_modes,\n" " uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(\n" " uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(\n" " intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_skc_forward_transform_enable(\n" " ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_block_based_raw_skip_sad(\n" " uchar block_based_skip_type,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "\n" "#if defined(__opencl_c_images)\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_sic_evaluate_ipe(\n" " read_only image2d_t src_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_sic_evaluate_with_single_reference(\n" " read_only image2d_t src_image, read_only image2d_t ref_image,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_sic_evaluate_with_dual_reference(\n" " read_only image2d_t src_image, read_only image2d_t fwd_ref_image,\n" " read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_sic_evaluate_with_multi_reference(\n" " read_only image2d_t src_image, uint packed_reference_ids,\n" " sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_sic_evaluate_with_multi_reference(\n" " read_only image2d_t src_image, uint packed_reference_ids,\n" " uchar packed_reference_field_polarities, sampler_t vme_media_sampler,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "#endif //defined(__opencl_c_images)\n" "\n" "uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(\n" " intel_sub_group_avc_sic_result_t result);\n" "ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(\n" " intel_sub_group_avc_sic_result_t result);\n" "ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(\n" " intel_sub_group_avc_sic_result_t result);\n" "ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(\n" " intel_sub_group_avc_sic_result_t result);\n" "uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(\n" " intel_sub_group_avc_sic_result_t result);\n" "uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(\n" " intel_sub_group_avc_sic_result_t result);\n" "ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(\n" " intel_sub_group_avc_sic_result_t result);\n" "ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(\n" " intel_sub_group_avc_sic_result_t result);\n" "\n" "// Wrappers\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(\n" " uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(\n" " uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(\n" " uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_inter_shape_penalty(\n" " ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_inter_shape_penalty(\n" " ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_inter_shape_penalty(\n" " ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_inter_direction_penalty(\n" " uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_inter_direction_penalty(\n" " uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_inter_direction_penalty(\n" " uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_motion_vector_cost_function(\n" " ulong packed_cost_center_delta, uint2 packed_cost_table,\n" " uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_motion_vector_cost_function(\n" " ulong packed_cost_center_delta, uint2 packed_cost_table,\n" " uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_motion_vector_cost_function(\n" " ulong packed_cost_center_delta, uint2 packed_cost_table,\n" " uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_source_interlaced_field_polarity(\n" " uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_source_interlaced_field_polarity(\n" " uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_source_interlaced_field_polarity(\n" " uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(\n" " uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(\n" " uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(\n" " uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(\n" " uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(\n" " uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,\n" " intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(\n" " uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,\n" " intel_sub_group_avc_sic_payload_t payload);\n" "\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_ime_set_ac_only_haar(\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_ref_set_ac_only_haar(\n" " intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_sic_set_ac_only_haar(\n" " intel_sub_group_avc_sic_payload_t payload);\n" "\n" "ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(\n" " intel_sub_group_avc_ime_result_t result);\n" "ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(\n" " intel_sub_group_avc_ime_result_t result);\n" "ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(\n" " intel_sub_group_avc_ref_result_t result);\n" "ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(\n" " intel_sub_group_avc_sic_result_t result);\n" "\n" "ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(\n" " intel_sub_group_avc_ime_result_t result);\n" "ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(\n" " intel_sub_group_avc_ref_result_t result);\n" "uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "uchar __ovld intel_sub_group_avc_ime_get_inter_directions(\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld intel_sub_group_avc_ref_get_inter_directions(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(\n" " intel_sub_group_avc_ime_result_t result);\n" "uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "uchar __ovld\n" "intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(\n" " uint packed_reference_ids, uint packed_reference_parameter_field_polarities,\n" " intel_sub_group_avc_ime_result_t result);\n" "uchar __ovld\n" "intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(\n" " uint packed_reference_ids, uint packed_reference_parameter_field_polarities,\n" " intel_sub_group_avc_ref_result_t result);\n" "\n" "// Type conversion functions\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_ime_convert_to_mce_payload(\n" " intel_sub_group_avc_ime_payload_t payload);\n" "intel_sub_group_avc_ime_payload_t __ovld\n" "intel_sub_group_avc_mce_convert_to_ime_payload(\n" " intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_ref_convert_to_mce_payload(\n" " intel_sub_group_avc_ref_payload_t payload);\n" "intel_sub_group_avc_ref_payload_t __ovld\n" "intel_sub_group_avc_mce_convert_to_ref_payload(\n" " intel_sub_group_avc_mce_payload_t payload);\n" "intel_sub_group_avc_mce_payload_t __ovld\n" "intel_sub_group_avc_sic_convert_to_mce_payload(\n" " intel_sub_group_avc_sic_payload_t payload);\n" "intel_sub_group_avc_sic_payload_t __ovld\n" "intel_sub_group_avc_mce_convert_to_sic_payload(\n" " intel_sub_group_avc_mce_payload_t payload);\n" "\n" "intel_sub_group_avc_mce_result_t __ovld\n" "intel_sub_group_avc_ime_convert_to_mce_result(\n" " intel_sub_group_avc_ime_result_t result);\n" "intel_sub_group_avc_ime_result_t __ovld\n" "intel_sub_group_avc_mce_convert_to_ime_result(\n" " intel_sub_group_avc_mce_result_t result);\n" "intel_sub_group_avc_mce_result_t __ovld\n" "intel_sub_group_avc_ref_convert_to_mce_result(\n" " intel_sub_group_avc_ref_result_t result);\n" "intel_sub_group_avc_ref_result_t __ovld\n" "intel_sub_group_avc_mce_convert_to_ref_result(\n" " intel_sub_group_avc_mce_result_t result);\n" "intel_sub_group_avc_mce_result_t __ovld\n" "intel_sub_group_avc_sic_convert_to_mce_result(\n" " intel_sub_group_avc_sic_result_t result);\n" "intel_sub_group_avc_sic_result_t __ovld\n" "intel_sub_group_avc_mce_convert_to_sic_result(\n" " intel_sub_group_avc_mce_result_t result);\n" "#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end\n" "#endif // cl_intel_device_side_avc_motion_estimation\n" "\n" "#ifdef cl_amd_media_ops\n" "uint __ovld amd_bitalign(uint, uint, uint);\n" "uint2 __ovld amd_bitalign(uint2, uint2, uint2);\n" "uint3 __ovld amd_bitalign(uint3, uint3, uint3);\n" "uint4 __ovld amd_bitalign(uint4, uint4, uint4);\n" "uint8 __ovld amd_bitalign(uint8, uint8, uint8);\n" "uint16 __ovld amd_bitalign(uint16, uint16, uint16);\n" "\n" "uint __ovld amd_bytealign(uint, uint, uint);\n" "uint2 __ovld amd_bytealign(uint2, uint2, uint2);\n" "uint3 __ovld amd_bytealign(uint3, uint3, uint3);\n" "uint4 __ovld amd_bytealign(uint4, uint4, uint4);\n" "uint8 __ovld amd_bytealign(uint8, uint8, uint8);\n" "uint16 __ovld amd_bytealign(uint16, uint16, uint16);\n" "\n" "uint __ovld amd_lerp(uint, uint, uint);\n" "uint2 __ovld amd_lerp(uint2, uint2, uint2);\n" "uint3 __ovld amd_lerp(uint3, uint3, uint3);\n" "uint4 __ovld amd_lerp(uint4, uint4, uint4);\n" "uint8 __ovld amd_lerp(uint8, uint8, uint8);\n" "uint16 __ovld amd_lerp(uint16, uint16, uint16);\n" "\n" "uint __ovld amd_pack(float4 v);\n" "\n" "uint __ovld amd_sad4(uint4, uint4, uint);\n" "\n" "uint __ovld amd_sadhi(uint, uint, uint);\n" "uint2 __ovld amd_sadhi(uint2, uint2, uint2);\n" "uint3 __ovld amd_sadhi(uint3, uint3, uint3);\n" "uint4 __ovld amd_sadhi(uint4, uint4, uint4);\n" "uint8 __ovld amd_sadhi(uint8, uint8, uint8);\n" "uint16 __ovld amd_sadhi(uint16, uint16, uint16);\n" "\n" "uint __ovld amd_sad(uint, uint, uint);\n" "uint2 __ovld amd_sad(uint2, uint2, uint2);\n" "uint3 __ovld amd_sad(uint3, uint3, uint3);\n" "uint4 __ovld amd_sad(uint4, uint4, uint4);\n" "uint8 __ovld amd_sad(uint8, uint8, uint8);\n" "uint16 __ovld amd_sad(uint16, uint16, uint16);\n" "\n" "float __ovld amd_unpack0(uint);\n" "float2 __ovld amd_unpack0(uint2);\n" "float3 __ovld amd_unpack0(uint3);\n" "float4 __ovld amd_unpack0(uint4);\n" "float8 __ovld amd_unpack0(uint8);\n" "float16 __ovld amd_unpack0(uint16);\n" "\n" "float __ovld amd_unpack1(uint);\n" "float2 __ovld amd_unpack1(uint2);\n" "float3 __ovld amd_unpack1(uint3);\n" "float4 __ovld amd_unpack1(uint4);\n" "float8 __ovld amd_unpack1(uint8);\n" "float16 __ovld amd_unpack1(uint16);\n" "\n" "float __ovld amd_unpack2(uint);\n" "float2 __ovld amd_unpack2(uint2);\n" "float3 __ovld amd_unpack2(uint3);\n" "float4 __ovld amd_unpack2(uint4);\n" "float8 __ovld amd_unpack2(uint8);\n" "float16 __ovld amd_unpack2(uint16);\n" "\n" "float __ovld amd_unpack3(uint);\n" "float2 __ovld amd_unpack3(uint2);\n" "float3 __ovld amd_unpack3(uint3);\n" "float4 __ovld amd_unpack3(uint4);\n" "float8 __ovld amd_unpack3(uint8);\n" "float16 __ovld amd_unpack3(uint16);\n" "#endif // cl_amd_media_ops\n" "\n" "#ifdef cl_amd_media_ops2\n" "int __ovld amd_bfe(int src0, uint src1, uint src2);\n" "int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);\n" "int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);\n" "int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);\n" "int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);\n" "int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);\n" "\n" "uint __ovld amd_bfe(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "uint __ovld amd_bfm(uint src0, uint src1);\n" "uint2 __ovld amd_bfm(uint2 src0, uint2 src1);\n" "uint3 __ovld amd_bfm(uint3 src0, uint3 src1);\n" "uint4 __ovld amd_bfm(uint4 src0, uint4 src1);\n" "uint8 __ovld amd_bfm(uint8 src0, uint8 src1);\n" "uint16 __ovld amd_bfm(uint16 src0, uint16 src1);\n" "\n" "float __ovld amd_max3(float src0, float src1, float src2);\n" "float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);\n" "float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);\n" "float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);\n" "float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);\n" "float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);\n" "\n" "int __ovld amd_max3(int src0, int src1, int src2);\n" "int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);\n" "int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);\n" "int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);\n" "int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);\n" "int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);\n" "\n" "uint __ovld amd_max3(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "float __ovld amd_median3(float src0, float src1, float src2);\n" "float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);\n" "float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);\n" "float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);\n" "float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);\n" "float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);\n" "\n" "int __ovld amd_median3(int src0, int src1, int src2);\n" "int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);\n" "int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);\n" "int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);\n" "int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);\n" "int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);\n" "\n" "uint __ovld amd_median3(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "float __ovld amd_min3(float src0, float src1, float src);\n" "float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);\n" "float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);\n" "float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);\n" "float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);\n" "float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);\n" "\n" "int __ovld amd_min3(int src0, int src1, int src2);\n" "int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);\n" "int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);\n" "int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);\n" "int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);\n" "int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);\n" "\n" "uint __ovld amd_min3(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);\n" "ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);\n" "ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);\n" "ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);\n" "ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);\n" "ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);\n" "\n" "ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);\n" "ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);\n" "ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);\n" "ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);\n" "ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);\n" "ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);\n" "\n" "uint __ovld amd_msad(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "uint __ovld amd_sadd(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);\n" "\n" "uint __ovld amd_sadw(uint src0, uint src1, uint src2);\n" "uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);\n" "uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);\n" "uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);\n" "uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);\n" "uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);\n" "#endif // cl_amd_media_ops2\n" "\n" "#if defined(cl_arm_integer_dot_product_int8)\n" "uint __ovld arm_dot(uchar4, uchar4);\n" "int __ovld arm_dot(char4, char4);\n" "#endif // defined(cl_arm_integer_dot_product_int8)\n" "\n" "#if defined(cl_arm_integer_dot_product_accumulate_int8)\n" "uint __ovld arm_dot_acc(uchar4, uchar4, uint);\n" "int __ovld arm_dot_acc(char4, char4, int);\n" "#endif // defined(cl_arm_integer_dot_product_accumulate_int8)\n" "\n" "#if defined(cl_arm_integer_dot_product_accumulate_int16)\n" "uint __ovld arm_dot_acc(ushort2, ushort2, uint);\n" "int __ovld arm_dot_acc(short2, short2, int);\n" "#endif // defined(cl_arm_integer_dot_product_accumulate_int16)\n" "\n" "#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)\n" "uint __ovld arm_dot_acc_sat(uchar4, uchar4, uint);\n" "int __ovld arm_dot_acc_sat(char4, char4, int);\n" "#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)\n" "\n" "// Disable any extensions we may have enabled previously.\n" "#pragma OPENCL EXTENSION all : disable\n" "\n" "#undef __opencl_c_named_address_space_builtins\n" "\n" "#undef __cnfn\n" "#undef __ovld\n" "#endif //_OPENCL_H_\n" "" } , { "/builtins/pconfigintrin.h" , "/*===---- pconfigintrin.h - X86 platform configuration ---------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __PCONFIGINTRIN_H\n" "#define __PCONFIGINTRIN_H\n" "\n" "#define __PCONFIG_KEY_PROGRAM 0x00000001\n" "\n" "#if __has_extension(gnu_asm)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"pconfig\")))\n" "\n" "static __inline unsigned int __DEFAULT_FN_ATTRS\n" "_pconfig_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])\n" "{\n" " unsigned int __result;\n" " __asm__ (\"pconfig\"\n" " : \"=a\" (__result), \"=b\" (__d[0]), \"=c\" (__d[1]), \"=d\" (__d[2])\n" " : \"a\" (__leaf), \"b\" (__d[0]), \"c\" (__d[1]), \"d\" (__d[2])\n" " : \"cc\");\n" " return __result;\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __has_extension(gnu_asm) */\n" "\n" "#endif\n" "" } , { "/builtins/pkuintrin.h" , "/*===---- pkuintrin.h - PKU intrinsics -------------------------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __PKUINTRIN_H\n" "#define __PKUINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"pku\")))\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_rdpkru_u32(void)\n" "{\n" " return __builtin_ia32_rdpkru();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_wrpkru(unsigned int __val)\n" "{\n" " __builtin_ia32_wrpkru(__val);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/pmmintrin.h" , "/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __PMMINTRIN_H\n" "#define __PMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse3\"), __min_vector_width__(128)))\n" "\n" "/// Loads data from an unaligned memory location to elements in a 128-bit\n" "/// vector.\n" "///\n" "/// If the address of the data is not 16-byte aligned, the instruction may\n" "/// read two adjacent aligned blocks of memory to retrieve the requested\n" "/// data.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VLDDQU instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit integer vector containing integer values.\n" "/// \\returns A 128-bit vector containing the moved values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_lddqu_si128(__m128i_u const *__p)\n" "{\n" " return (__m128i)__builtin_ia32_lddqu((char const *)__p);\n" "}\n" "\n" "/// Adds the even-indexed values and subtracts the odd-indexed values of\n" "/// two 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSUBPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the left source operand.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the right source operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the alternating sums and\n" "/// differences of both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_addsub_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in two\n" "/// 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHADDPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the horizontal sums of\n" "/// both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_hadd_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in two\n" "/// 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHSUBPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The horizontal differences between the values are stored in the lower\n" "/// bits of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The horizontal differences between the values are stored in the upper\n" "/// bits of the destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the horizontal\n" "/// differences of both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_hsub_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Moves and duplicates odd-indexed values from a 128-bit vector\n" "/// of [4 x float] to float values stored in a 128-bit vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSHDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. \\n\n" "/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of\n" "/// the destination. \\n\n" "/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the moved and duplicated\n" "/// values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_movehdup_ps(__m128 __a)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);\n" "}\n" "\n" "/// Duplicates even-indexed values from a 128-bit vector of\n" "/// [4 x float] to float values stored in a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSLDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] \\n\n" "/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of\n" "/// the destination. \\n\n" "/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the moved and duplicated\n" "/// values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_moveldup_ps(__m128 __a)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2);\n" "}\n" "\n" "/// Adds the even-indexed values and subtracts the odd-indexed values of\n" "/// two 128-bit vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing the left source operand.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing the right source operand.\n" "/// \\returns A 128-bit vector of [2 x double] containing the alternating sums\n" "/// and differences of both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_addsub_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Horizontally adds the pairs of values contained in two 128-bit\n" "/// vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHADDPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// The horizontal sum of the values is stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// The horizontal sum of the values is stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [2 x double] containing the horizontal sums of\n" "/// both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_hadd_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Horizontally subtracts the pairs of values contained in two 128-bit\n" "/// vectors of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VHSUBPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// The horizontal difference of the values is stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [2 x double] containing one of the source operands.\n" "/// The horizontal difference of the values is stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 128-bit vector of [2 x double] containing the horizontal\n" "/// differences of both operands.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_hsub_pd(__m128d __a, __m128d __b)\n" "{\n" " return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);\n" "}\n" "\n" "/// Moves and duplicates one double-precision value to double-precision\n" "/// values stored in a 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_loaddup_pd(double const *dp);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP instruction.\n" "///\n" "/// \\param dp\n" "/// A pointer to a double-precision value to be moved and duplicated.\n" "/// \\returns A 128-bit vector of [2 x double] containing the moved and\n" "/// duplicated values.\n" "#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)\n" "\n" "/// Moves and duplicates the double-precision value in the lower bits of\n" "/// a 128-bit vector of [2 x double] to double-precision values stored in a\n" "/// 128-bit vector of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVDDUP instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits\n" "/// [127:64] and [63:0] of the destination.\n" "/// \\returns A 128-bit vector of [2 x double] containing the moved and\n" "/// duplicated values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_movedup_pd(__m128d __a)\n" "{\n" " return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);\n" "}\n" "\n" "/// Establishes a linear address memory range to be monitored and puts\n" "/// the processor in the monitor event pending state. Data stored in the\n" "/// monitored address range causes the processor to exit the pending state.\n" "///\n" "/// The \\c MONITOR instruction can be used in kernel mode, and in other modes\n" "/// if MSR C001_0015h[MonMwaitUserEn] is set.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MONITOR instruction.\n" "///\n" "/// \\param __p\n" "/// The memory range to be monitored. The size of the range is determined by\n" "/// CPUID function 0000_0005h.\n" "/// \\param __extensions\n" "/// Optional extensions for the monitoring state.\n" "/// \\param __hints\n" "/// Optional hints for the monitoring state.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)\n" "{\n" " __builtin_ia32_monitor(__p, __extensions, __hints);\n" "}\n" "\n" "/// Used with the \\c MONITOR instruction to wait while the processor is in\n" "/// the monitor event pending state. Data stored in the monitored address\n" "/// range, or an interrupt, causes the processor to exit the pending state.\n" "///\n" "/// The \\c MWAIT instruction can be used in kernel mode, and in other modes if\n" "/// MSR C001_0015h[MonMwaitUserEn] is set.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c MWAIT instruction.\n" "///\n" "/// \\param __extensions\n" "/// Optional extensions for the monitoring state, which can vary by\n" "/// processor.\n" "/// \\param __hints\n" "/// Optional hints for the monitoring state, which can vary by processor.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_mwait(unsigned __extensions, unsigned __hints)\n" "{\n" " __builtin_ia32_mwait(__extensions, __hints);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __PMMINTRIN_H */\n" "" } , { "/builtins/popcntintrin.h" , "/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __POPCNTINTRIN_H\n" "#define __POPCNTINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"popcnt\")))\n" "\n" "#if defined(__cplusplus) && (__cplusplus >= 201103L)\n" "#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr\n" "#else\n" "#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS\n" "#endif\n" "\n" "/// Counts the number of bits in the source operand having a value of 1.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the POPCNT instruction.\n" "///\n" "/// \\param __A\n" "/// An unsigned 32-bit integer operand.\n" "/// \\returns A 32-bit integer containing the number of bits with value 1 in the\n" "/// source operand.\n" "static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR\n" "_mm_popcnt_u32(unsigned int __A)\n" "{\n" " return __builtin_popcount(__A);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Counts the number of bits in the source operand having a value of 1.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the POPCNT instruction.\n" "///\n" "/// \\param __A\n" "/// An unsigned 64-bit integer operand.\n" "/// \\returns A 64-bit integer containing the number of bits with value 1 in the\n" "/// source operand.\n" "static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR\n" "_mm_popcnt_u64(unsigned long long __A)\n" "{\n" " return __builtin_popcountll(__A);\n" "}\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_CONSTEXPR\n" "\n" "#endif /* __POPCNTINTRIN_H */\n" "" } , { "/builtins/prfchiintrin.h" , "/*===---- prfchiintrin.h - PREFETCHI intrinsic -----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __PRFCHIINTRIN_H\n" "#define __PRFCHIINTRIN_H\n" "\n" "#ifdef __x86_64__\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"prefetchi\")))\n" "\n" "/// Loads an instruction sequence containing the specified memory address into\n" "/// all level cache.\n" "///\n" "/// Note that the effect of this intrinsic is dependent on the processor\n" "/// implementation.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PREFETCHIT0 instruction.\n" "///\n" "/// \\param __P\n" "/// A pointer specifying the memory address to be prefetched.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_m_prefetchit0(volatile const void *__P) {\n" "#pragma clang diagnostic push\n" "#pragma clang diagnostic ignored \"-Wcast-qual\"\n" " __builtin_ia32_prefetchi((const void *)__P, 3 /* _MM_HINT_T0 */);\n" "#pragma clang diagnostic pop\n" "}\n" "\n" "/// Loads an instruction sequence containing the specified memory address into\n" "/// all but the first-level cache.\n" "///\n" "/// Note that the effect of this intrinsic is dependent on the processor\n" "/// implementation.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PREFETCHIT1 instruction.\n" "///\n" "/// \\param __P\n" "/// A pointer specifying the memory address to be prefetched.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_m_prefetchit1(volatile const void *__P) {\n" "#pragma clang diagnostic push\n" "#pragma clang diagnostic ignored \"-Wcast-qual\"\n" " __builtin_ia32_prefetchi((const void *)__P, 2 /* _MM_HINT_T1 */);\n" "#pragma clang diagnostic pop\n" "}\n" "#endif /* __x86_64__ */\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __PRFCHWINTRIN_H */\n" "" } , { "/builtins/prfchwintrin.h" , "/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)\n" "#error \"Never use directly; include or instead.\"\n" "#endif\n" "\n" "#ifndef __PRFCHWINTRIN_H\n" "#define __PRFCHWINTRIN_H\n" "\n" "/// Loads a memory sequence containing the specified memory address into\n" "/// all data cache levels. The cache-coherency state is set to exclusive.\n" "/// Data can be read from and written to the cache line without additional\n" "/// delay.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PREFETCHT0 instruction.\n" "///\n" "/// \\param __P\n" "/// A pointer specifying the memory address to be prefetched.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__))\n" "_m_prefetch(void *__P)\n" "{\n" " __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);\n" "}\n" "\n" "/// Loads a memory sequence containing the specified memory address into\n" "/// the L1 data cache and sets the cache-coherency to modified. This\n" "/// provides a hint to the processor that the cache line will be modified.\n" "/// It is intended for use when the cache line will be written to shortly\n" "/// after the prefetch is performed.\n" "///\n" "/// Note that the effect of this intrinsic is dependent on the processor\n" "/// implementation.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PREFETCHW instruction.\n" "///\n" "/// \\param __P\n" "/// A pointer specifying the memory address to be prefetched.\n" "static __inline__ void __attribute__((__always_inline__, __nodebug__))\n" "_m_prefetchw(volatile const void *__P)\n" "{\n" "#pragma clang diagnostic push\n" "#pragma clang diagnostic ignored \"-Wcast-qual\"\n" " __builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */);\n" "#pragma clang diagnostic pop\n" "}\n" "\n" "#endif /* __PRFCHWINTRIN_H */\n" "" } , { "/builtins/ptwriteintrin.h" , "/*===------------ ptwriteintrin.h - PTWRITE intrinsic --------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __PTWRITEINTRIN_H\n" "#define __PTWRITEINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"ptwrite\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_ptwrite32(unsigned int __value) {\n" " __builtin_ia32_ptwrite32(__value);\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_ptwrite64(unsigned long long __value) {\n" " __builtin_ia32_ptwrite64(__value);\n" "}\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __PTWRITEINTRIN_H */\n" "" } , { "/builtins/raointintrin.h" , "/*===----------------------- raointintrin.h - RAOINT ------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86GPRINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif // __X86GPRINTRIN_H\n" "\n" "#ifndef __RAOINTINTRIN_H\n" "#define __RAOINTINTRIN_H\n" "\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"raoint\")))\n" "\n" "/// Atomically add a 32-bit value at memory operand \\a __A and a 32-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AADD instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 32-bit memory location.\n" "/// \\param __B\n" "/// A 32-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aadd_i32(int *__A, int __B) {\n" " __builtin_ia32_aadd32((int *)__A, __B);\n" "}\n" "\n" "/// Atomically and a 32-bit value at memory operand \\a __A and a 32-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AAND instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 32-bit memory location.\n" "/// \\param __B\n" "/// A 32-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aand_i32(int *__A, int __B) {\n" " __builtin_ia32_aand32((int *)__A, __B);\n" "}\n" "\n" "/// Atomically or a 32-bit value at memory operand \\a __A and a 32-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AOR instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 32-bit memory location.\n" "/// \\param __B\n" "/// A 32-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aor_i32(int *__A, int __B) {\n" " __builtin_ia32_aor32((int *)__A, __B);\n" "}\n" "\n" "/// Atomically xor a 32-bit value at memory operand \\a __A and a 32-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AXOR instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 32-bit memory location.\n" "/// \\param __B\n" "/// A 32-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _axor_i32(int *__A, int __B) {\n" " __builtin_ia32_axor32((int *)__A, __B);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Atomically add a 64-bit value at memory operand \\a __A and a 64-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AADD instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __B\n" "/// A 64-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aadd_i64(long long *__A,\n" " long long __B) {\n" " __builtin_ia32_aadd64((long long *)__A, __B);\n" "}\n" "\n" "/// Atomically and a 64-bit value at memory operand \\a __A and a 64-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AAND instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __B\n" "/// A 64-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aand_i64(long long *__A,\n" " long long __B) {\n" " __builtin_ia32_aand64((long long *)__A, __B);\n" "}\n" "\n" "/// Atomically or a 64-bit value at memory operand \\a __A and a 64-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AOR instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __B\n" "/// A 64-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _aor_i64(long long *__A,\n" " long long __B) {\n" " __builtin_ia32_aor64((long long *)__A, __B);\n" "}\n" "\n" "/// Atomically xor a 64-bit value at memory operand \\a __A and a 64-bit \\a __B,\n" "/// and store the result to the same memory location.\n" "///\n" "/// This intrinsic should be used for contention or weak ordering. It may\n" "/// result in bad performance for hot data used by single thread only.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c AXOR instruction.\n" "///\n" "/// \\param __A\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __B\n" "/// A 64-bit integer value.\n" "///\n" "/// \\code{.operation}\n" "/// MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0]\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS _axor_i64(long long *__A,\n" " long long __B) {\n" " __builtin_ia32_axor64((long long *)__A, __B);\n" "}\n" "#endif // __x86_64__\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#endif // __RAOINTINTRIN_H\n" "" } , { "/builtins/rdpruintrin.h" , "/*===---- rdpruintrin.h - RDPRU intrinsics ---------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __RDPRUINTRIN_H\n" "#define __RDPRUINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"rdpru\")))\n" "\n" "\n" "/// Reads the content of a processor register.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the RDPRU instruction.\n" "///\n" "/// \\param reg_id\n" "/// A processor register identifier.\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__rdpru (int reg_id)\n" "{\n" " return __builtin_ia32_rdpru(reg_id);\n" "}\n" "\n" "#define __RDPRU_MPERF 0\n" "#define __RDPRU_APERF 1\n" "\n" "/// Reads the content of processor register MPERF.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic generates instruction RDPRU to read the value of\n" "/// register MPERF.\n" "#define __mperf() __builtin_ia32_rdpru(__RDPRU_MPERF)\n" "\n" "/// Reads the content of processor register APERF.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic generates instruction RDPRU to read the value of\n" "/// register APERF.\n" "#define __aperf() __builtin_ia32_rdpru(__RDPRU_APERF)\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __RDPRUINTRIN_H */\n" "" } , { "/builtins/rdseedintrin.h" , "/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __RDSEEDINTRIN_H\n" "#define __RDSEEDINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"rdseed\")))\n" "\n" "/// Stores a hardware-generated 16-bit random value in the memory at \\a __p.\n" "///\n" "/// The random number generator complies with NIST SP800-90B and SP800-90C.\n" "///\n" "/// \\code{.operation}\n" "/// IF HW_NRND_GEN.ready == 1\n" "/// Store16(__p, HW_NRND_GEN.data)\n" "/// result := 1\n" "/// ELSE\n" "/// Store16(__p, 0)\n" "/// result := 0\n" "/// END\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c RDSEED instruction.\n" "///\n" "/// \\param __p\n" "/// Pointer to memory for storing the 16-bit random number.\n" "/// \\returns 1 if a random number was generated, 0 if not.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_rdseed16_step(unsigned short *__p)\n" "{\n" " return (int) __builtin_ia32_rdseed16_step(__p);\n" "}\n" "\n" "/// Stores a hardware-generated 32-bit random value in the memory at \\a __p.\n" "///\n" "/// The random number generator complies with NIST SP800-90B and SP800-90C.\n" "///\n" "/// \\code{.operation}\n" "/// IF HW_NRND_GEN.ready == 1\n" "/// Store32(__p, HW_NRND_GEN.data)\n" "/// result := 1\n" "/// ELSE\n" "/// Store32(__p, 0)\n" "/// result := 0\n" "/// END\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c RDSEED instruction.\n" "///\n" "/// \\param __p\n" "/// Pointer to memory for storing the 32-bit random number.\n" "/// \\returns 1 if a random number was generated, 0 if not.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_rdseed32_step(unsigned int *__p)\n" "{\n" " return (int) __builtin_ia32_rdseed32_step(__p);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Stores a hardware-generated 64-bit random value in the memory at \\a __p.\n" "///\n" "/// The random number generator complies with NIST SP800-90B and SP800-90C.\n" "///\n" "/// \\code{.operation}\n" "/// IF HW_NRND_GEN.ready == 1\n" "/// Store64(__p, HW_NRND_GEN.data)\n" "/// result := 1\n" "/// ELSE\n" "/// Store64(__p, 0)\n" "/// result := 0\n" "/// END\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c RDSEED instruction.\n" "///\n" "/// \\param __p\n" "/// Pointer to memory for storing the 64-bit random number.\n" "/// \\returns 1 if a random number was generated, 0 if not.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_rdseed64_step(unsigned long long *__p)\n" "{\n" " return (int) __builtin_ia32_rdseed64_step(__p);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __RDSEEDINTRIN_H */\n" "" } , { "/builtins/riscv_ntlh.h" , "/*===---- riscv_ntlh.h - RISC-V NTLH intrinsics ----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __RISCV_NTLH_H\n" "#define __RISCV_NTLH_H\n" "\n" "#ifndef __riscv_zihintntl\n" "#error \"NTLH intrinsics require the NTLH extension.\"\n" "#endif\n" "\n" "enum {\n" " __RISCV_NTLH_INNERMOST_PRIVATE = 2,\n" " __RISCV_NTLH_ALL_PRIVATE,\n" " __RISCV_NTLH_INNERMOST_SHARED,\n" " __RISCV_NTLH_ALL\n" "};\n" "\n" "#define __riscv_ntl_load(PTR, DOMAIN) __builtin_riscv_ntl_load((PTR), (DOMAIN))\n" "#define __riscv_ntl_store(PTR, VAL, DOMAIN) \\\n" " __builtin_riscv_ntl_store((PTR), (VAL), (DOMAIN))\n" "\n" "#endif" } , { "/builtins/riscv_vector.h" , "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics -------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __RISCV_VECTOR_H\n" "#define __RISCV_VECTOR_H\n" "\n" "#include \n" "#include \n" "\n" "#ifndef __riscv_vector\n" "#error \"Vector intrinsics require the vector extension.\"\n" "#endif\n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "#pragma clang riscv intrinsic vector\n" "\n" "\n" "enum __RISCV_FRM {\n" " __RISCV_FRM_RNE = 0,\n" " __RISCV_FRM_RTZ = 1,\n" " __RISCV_FRM_RDN = 2,\n" " __RISCV_FRM_RUP = 3,\n" " __RISCV_FRM_RMM = 4,\n" "};\n" "\n" "#define __riscv_vlenb() __builtin_rvv_vlenb()\n" "\n" "#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)\n" "#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)\n" "#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)\n" "#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)\n" "#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)\n" "#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)\n" "\n" "#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)\n" "#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)\n" "#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)\n" "#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)\n" "#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)\n" "\n" "#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)\n" "#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)\n" "#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)\n" "#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)\n" "\n" "#if __riscv_v_elen >= 64\n" "#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)\n" "#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)\n" "#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)\n" "\n" "#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)\n" "#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)\n" "#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)\n" "#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)\n" "#endif\n" "\n" "#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)\n" "#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)\n" "#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)\n" "#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)\n" "#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)\n" "#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)\n" "\n" "#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)\n" "#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)\n" "#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)\n" "#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)\n" "#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)\n" "\n" "#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)\n" "#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)\n" "#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)\n" "#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)\n" "\n" "#if __riscv_v_elen >= 64\n" "#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)\n" "#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)\n" "#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)\n" "\n" "#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)\n" "#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)\n" "#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)\n" "#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)\n" "#endif\n" "\n" "\n" "enum __RISCV_VXRM {\n" " __RISCV_VXRM_RNU = 0,\n" " __RISCV_VXRM_RNE = 1,\n" " __RISCV_VXRM_RDN = 2,\n" " __RISCV_VXRM_ROD = 3,\n" "};\n" "typedef __rvv_bool64_t vbool64_t;\n" "typedef __rvv_bool32_t vbool32_t;\n" "typedef __rvv_bool16_t vbool16_t;\n" "typedef __rvv_bool8_t vbool8_t;\n" "typedef __rvv_bool4_t vbool4_t;\n" "typedef __rvv_bool2_t vbool2_t;\n" "typedef __rvv_bool1_t vbool1_t;\n" "typedef __rvv_int8mf8_t vint8mf8_t;\n" "typedef __rvv_uint8mf8_t vuint8mf8_t;\n" "typedef __rvv_int8mf8x2_t vint8mf8x2_t;\n" "typedef __rvv_uint8mf8x2_t vuint8mf8x2_t;\n" "typedef __rvv_int8mf8x3_t vint8mf8x3_t;\n" "typedef __rvv_uint8mf8x3_t vuint8mf8x3_t;\n" "typedef __rvv_int8mf8x4_t vint8mf8x4_t;\n" "typedef __rvv_uint8mf8x4_t vuint8mf8x4_t;\n" "typedef __rvv_int8mf8x5_t vint8mf8x5_t;\n" "typedef __rvv_uint8mf8x5_t vuint8mf8x5_t;\n" "typedef __rvv_int8mf8x6_t vint8mf8x6_t;\n" "typedef __rvv_uint8mf8x6_t vuint8mf8x6_t;\n" "typedef __rvv_int8mf8x7_t vint8mf8x7_t;\n" "typedef __rvv_uint8mf8x7_t vuint8mf8x7_t;\n" "typedef __rvv_int8mf8x8_t vint8mf8x8_t;\n" "typedef __rvv_uint8mf8x8_t vuint8mf8x8_t;\n" "typedef __rvv_int8mf4_t vint8mf4_t;\n" "typedef __rvv_uint8mf4_t vuint8mf4_t;\n" "typedef __rvv_int8mf4x2_t vint8mf4x2_t;\n" "typedef __rvv_uint8mf4x2_t vuint8mf4x2_t;\n" "typedef __rvv_int8mf4x3_t vint8mf4x3_t;\n" "typedef __rvv_uint8mf4x3_t vuint8mf4x3_t;\n" "typedef __rvv_int8mf4x4_t vint8mf4x4_t;\n" "typedef __rvv_uint8mf4x4_t vuint8mf4x4_t;\n" "typedef __rvv_int8mf4x5_t vint8mf4x5_t;\n" "typedef __rvv_uint8mf4x5_t vuint8mf4x5_t;\n" "typedef __rvv_int8mf4x6_t vint8mf4x6_t;\n" "typedef __rvv_uint8mf4x6_t vuint8mf4x6_t;\n" "typedef __rvv_int8mf4x7_t vint8mf4x7_t;\n" "typedef __rvv_uint8mf4x7_t vuint8mf4x7_t;\n" "typedef __rvv_int8mf4x8_t vint8mf4x8_t;\n" "typedef __rvv_uint8mf4x8_t vuint8mf4x8_t;\n" "typedef __rvv_int8mf2_t vint8mf2_t;\n" "typedef __rvv_uint8mf2_t vuint8mf2_t;\n" "typedef __rvv_int8mf2x2_t vint8mf2x2_t;\n" "typedef __rvv_uint8mf2x2_t vuint8mf2x2_t;\n" "typedef __rvv_int8mf2x3_t vint8mf2x3_t;\n" "typedef __rvv_uint8mf2x3_t vuint8mf2x3_t;\n" "typedef __rvv_int8mf2x4_t vint8mf2x4_t;\n" "typedef __rvv_uint8mf2x4_t vuint8mf2x4_t;\n" "typedef __rvv_int8mf2x5_t vint8mf2x5_t;\n" "typedef __rvv_uint8mf2x5_t vuint8mf2x5_t;\n" "typedef __rvv_int8mf2x6_t vint8mf2x6_t;\n" "typedef __rvv_uint8mf2x6_t vuint8mf2x6_t;\n" "typedef __rvv_int8mf2x7_t vint8mf2x7_t;\n" "typedef __rvv_uint8mf2x7_t vuint8mf2x7_t;\n" "typedef __rvv_int8mf2x8_t vint8mf2x8_t;\n" "typedef __rvv_uint8mf2x8_t vuint8mf2x8_t;\n" "typedef __rvv_int8m1_t vint8m1_t;\n" "typedef __rvv_uint8m1_t vuint8m1_t;\n" "typedef __rvv_int8m1x2_t vint8m1x2_t;\n" "typedef __rvv_uint8m1x2_t vuint8m1x2_t;\n" "typedef __rvv_int8m1x3_t vint8m1x3_t;\n" "typedef __rvv_uint8m1x3_t vuint8m1x3_t;\n" "typedef __rvv_int8m1x4_t vint8m1x4_t;\n" "typedef __rvv_uint8m1x4_t vuint8m1x4_t;\n" "typedef __rvv_int8m1x5_t vint8m1x5_t;\n" "typedef __rvv_uint8m1x5_t vuint8m1x5_t;\n" "typedef __rvv_int8m1x6_t vint8m1x6_t;\n" "typedef __rvv_uint8m1x6_t vuint8m1x6_t;\n" "typedef __rvv_int8m1x7_t vint8m1x7_t;\n" "typedef __rvv_uint8m1x7_t vuint8m1x7_t;\n" "typedef __rvv_int8m1x8_t vint8m1x8_t;\n" "typedef __rvv_uint8m1x8_t vuint8m1x8_t;\n" "typedef __rvv_int8m2_t vint8m2_t;\n" "typedef __rvv_uint8m2_t vuint8m2_t;\n" "typedef __rvv_int8m2x2_t vint8m2x2_t;\n" "typedef __rvv_uint8m2x2_t vuint8m2x2_t;\n" "typedef __rvv_int8m2x3_t vint8m2x3_t;\n" "typedef __rvv_uint8m2x3_t vuint8m2x3_t;\n" "typedef __rvv_int8m2x4_t vint8m2x4_t;\n" "typedef __rvv_uint8m2x4_t vuint8m2x4_t;\n" "typedef __rvv_int8m4_t vint8m4_t;\n" "typedef __rvv_uint8m4_t vuint8m4_t;\n" "typedef __rvv_int8m4x2_t vint8m4x2_t;\n" "typedef __rvv_uint8m4x2_t vuint8m4x2_t;\n" "typedef __rvv_int8m8_t vint8m8_t;\n" "typedef __rvv_uint8m8_t vuint8m8_t;\n" "typedef __rvv_int16mf4_t vint16mf4_t;\n" "typedef __rvv_uint16mf4_t vuint16mf4_t;\n" "typedef __rvv_int16mf4x2_t vint16mf4x2_t;\n" "typedef __rvv_uint16mf4x2_t vuint16mf4x2_t;\n" "typedef __rvv_int16mf4x3_t vint16mf4x3_t;\n" "typedef __rvv_uint16mf4x3_t vuint16mf4x3_t;\n" "typedef __rvv_int16mf4x4_t vint16mf4x4_t;\n" "typedef __rvv_uint16mf4x4_t vuint16mf4x4_t;\n" "typedef __rvv_int16mf4x5_t vint16mf4x5_t;\n" "typedef __rvv_uint16mf4x5_t vuint16mf4x5_t;\n" "typedef __rvv_int16mf4x6_t vint16mf4x6_t;\n" "typedef __rvv_uint16mf4x6_t vuint16mf4x6_t;\n" "typedef __rvv_int16mf4x7_t vint16mf4x7_t;\n" "typedef __rvv_uint16mf4x7_t vuint16mf4x7_t;\n" "typedef __rvv_int16mf4x8_t vint16mf4x8_t;\n" "typedef __rvv_uint16mf4x8_t vuint16mf4x8_t;\n" "typedef __rvv_int16mf2_t vint16mf2_t;\n" "typedef __rvv_uint16mf2_t vuint16mf2_t;\n" "typedef __rvv_int16mf2x2_t vint16mf2x2_t;\n" "typedef __rvv_uint16mf2x2_t vuint16mf2x2_t;\n" "typedef __rvv_int16mf2x3_t vint16mf2x3_t;\n" "typedef __rvv_uint16mf2x3_t vuint16mf2x3_t;\n" "typedef __rvv_int16mf2x4_t vint16mf2x4_t;\n" "typedef __rvv_uint16mf2x4_t vuint16mf2x4_t;\n" "typedef __rvv_int16mf2x5_t vint16mf2x5_t;\n" "typedef __rvv_uint16mf2x5_t vuint16mf2x5_t;\n" "typedef __rvv_int16mf2x6_t vint16mf2x6_t;\n" "typedef __rvv_uint16mf2x6_t vuint16mf2x6_t;\n" "typedef __rvv_int16mf2x7_t vint16mf2x7_t;\n" "typedef __rvv_uint16mf2x7_t vuint16mf2x7_t;\n" "typedef __rvv_int16mf2x8_t vint16mf2x8_t;\n" "typedef __rvv_uint16mf2x8_t vuint16mf2x8_t;\n" "typedef __rvv_int16m1_t vint16m1_t;\n" "typedef __rvv_uint16m1_t vuint16m1_t;\n" "typedef __rvv_int16m1x2_t vint16m1x2_t;\n" "typedef __rvv_uint16m1x2_t vuint16m1x2_t;\n" "typedef __rvv_int16m1x3_t vint16m1x3_t;\n" "typedef __rvv_uint16m1x3_t vuint16m1x3_t;\n" "typedef __rvv_int16m1x4_t vint16m1x4_t;\n" "typedef __rvv_uint16m1x4_t vuint16m1x4_t;\n" "typedef __rvv_int16m1x5_t vint16m1x5_t;\n" "typedef __rvv_uint16m1x5_t vuint16m1x5_t;\n" "typedef __rvv_int16m1x6_t vint16m1x6_t;\n" "typedef __rvv_uint16m1x6_t vuint16m1x6_t;\n" "typedef __rvv_int16m1x7_t vint16m1x7_t;\n" "typedef __rvv_uint16m1x7_t vuint16m1x7_t;\n" "typedef __rvv_int16m1x8_t vint16m1x8_t;\n" "typedef __rvv_uint16m1x8_t vuint16m1x8_t;\n" "typedef __rvv_int16m2_t vint16m2_t;\n" "typedef __rvv_uint16m2_t vuint16m2_t;\n" "typedef __rvv_int16m2x2_t vint16m2x2_t;\n" "typedef __rvv_uint16m2x2_t vuint16m2x2_t;\n" "typedef __rvv_int16m2x3_t vint16m2x3_t;\n" "typedef __rvv_uint16m2x3_t vuint16m2x3_t;\n" "typedef __rvv_int16m2x4_t vint16m2x4_t;\n" "typedef __rvv_uint16m2x4_t vuint16m2x4_t;\n" "typedef __rvv_int16m4_t vint16m4_t;\n" "typedef __rvv_uint16m4_t vuint16m4_t;\n" "typedef __rvv_int16m4x2_t vint16m4x2_t;\n" "typedef __rvv_uint16m4x2_t vuint16m4x2_t;\n" "typedef __rvv_int16m8_t vint16m8_t;\n" "typedef __rvv_uint16m8_t vuint16m8_t;\n" "typedef __rvv_int32mf2_t vint32mf2_t;\n" "typedef __rvv_uint32mf2_t vuint32mf2_t;\n" "typedef __rvv_int32mf2x2_t vint32mf2x2_t;\n" "typedef __rvv_uint32mf2x2_t vuint32mf2x2_t;\n" "typedef __rvv_int32mf2x3_t vint32mf2x3_t;\n" "typedef __rvv_uint32mf2x3_t vuint32mf2x3_t;\n" "typedef __rvv_int32mf2x4_t vint32mf2x4_t;\n" "typedef __rvv_uint32mf2x4_t vuint32mf2x4_t;\n" "typedef __rvv_int32mf2x5_t vint32mf2x5_t;\n" "typedef __rvv_uint32mf2x5_t vuint32mf2x5_t;\n" "typedef __rvv_int32mf2x6_t vint32mf2x6_t;\n" "typedef __rvv_uint32mf2x6_t vuint32mf2x6_t;\n" "typedef __rvv_int32mf2x7_t vint32mf2x7_t;\n" "typedef __rvv_uint32mf2x7_t vuint32mf2x7_t;\n" "typedef __rvv_int32mf2x8_t vint32mf2x8_t;\n" "typedef __rvv_uint32mf2x8_t vuint32mf2x8_t;\n" "typedef __rvv_int32m1_t vint32m1_t;\n" "typedef __rvv_uint32m1_t vuint32m1_t;\n" "typedef __rvv_int32m1x2_t vint32m1x2_t;\n" "typedef __rvv_uint32m1x2_t vuint32m1x2_t;\n" "typedef __rvv_int32m1x3_t vint32m1x3_t;\n" "typedef __rvv_uint32m1x3_t vuint32m1x3_t;\n" "typedef __rvv_int32m1x4_t vint32m1x4_t;\n" "typedef __rvv_uint32m1x4_t vuint32m1x4_t;\n" "typedef __rvv_int32m1x5_t vint32m1x5_t;\n" "typedef __rvv_uint32m1x5_t vuint32m1x5_t;\n" "typedef __rvv_int32m1x6_t vint32m1x6_t;\n" "typedef __rvv_uint32m1x6_t vuint32m1x6_t;\n" "typedef __rvv_int32m1x7_t vint32m1x7_t;\n" "typedef __rvv_uint32m1x7_t vuint32m1x7_t;\n" "typedef __rvv_int32m1x8_t vint32m1x8_t;\n" "typedef __rvv_uint32m1x8_t vuint32m1x8_t;\n" "typedef __rvv_int32m2_t vint32m2_t;\n" "typedef __rvv_uint32m2_t vuint32m2_t;\n" "typedef __rvv_int32m2x2_t vint32m2x2_t;\n" "typedef __rvv_uint32m2x2_t vuint32m2x2_t;\n" "typedef __rvv_int32m2x3_t vint32m2x3_t;\n" "typedef __rvv_uint32m2x3_t vuint32m2x3_t;\n" "typedef __rvv_int32m2x4_t vint32m2x4_t;\n" "typedef __rvv_uint32m2x4_t vuint32m2x4_t;\n" "typedef __rvv_int32m4_t vint32m4_t;\n" "typedef __rvv_uint32m4_t vuint32m4_t;\n" "typedef __rvv_int32m4x2_t vint32m4x2_t;\n" "typedef __rvv_uint32m4x2_t vuint32m4x2_t;\n" "typedef __rvv_int32m8_t vint32m8_t;\n" "typedef __rvv_uint32m8_t vuint32m8_t;\n" "typedef __rvv_int64m1_t vint64m1_t;\n" "typedef __rvv_uint64m1_t vuint64m1_t;\n" "typedef __rvv_int64m1x2_t vint64m1x2_t;\n" "typedef __rvv_uint64m1x2_t vuint64m1x2_t;\n" "typedef __rvv_int64m1x3_t vint64m1x3_t;\n" "typedef __rvv_uint64m1x3_t vuint64m1x3_t;\n" "typedef __rvv_int64m1x4_t vint64m1x4_t;\n" "typedef __rvv_uint64m1x4_t vuint64m1x4_t;\n" "typedef __rvv_int64m1x5_t vint64m1x5_t;\n" "typedef __rvv_uint64m1x5_t vuint64m1x5_t;\n" "typedef __rvv_int64m1x6_t vint64m1x6_t;\n" "typedef __rvv_uint64m1x6_t vuint64m1x6_t;\n" "typedef __rvv_int64m1x7_t vint64m1x7_t;\n" "typedef __rvv_uint64m1x7_t vuint64m1x7_t;\n" "typedef __rvv_int64m1x8_t vint64m1x8_t;\n" "typedef __rvv_uint64m1x8_t vuint64m1x8_t;\n" "typedef __rvv_int64m2_t vint64m2_t;\n" "typedef __rvv_uint64m2_t vuint64m2_t;\n" "typedef __rvv_int64m2x2_t vint64m2x2_t;\n" "typedef __rvv_uint64m2x2_t vuint64m2x2_t;\n" "typedef __rvv_int64m2x3_t vint64m2x3_t;\n" "typedef __rvv_uint64m2x3_t vuint64m2x3_t;\n" "typedef __rvv_int64m2x4_t vint64m2x4_t;\n" "typedef __rvv_uint64m2x4_t vuint64m2x4_t;\n" "typedef __rvv_int64m4_t vint64m4_t;\n" "typedef __rvv_uint64m4_t vuint64m4_t;\n" "typedef __rvv_int64m4x2_t vint64m4x2_t;\n" "typedef __rvv_uint64m4x2_t vuint64m4x2_t;\n" "typedef __rvv_int64m8_t vint64m8_t;\n" "typedef __rvv_uint64m8_t vuint64m8_t;\n" "typedef __rvv_float16mf4_t vfloat16mf4_t;\n" "typedef __rvv_float16mf4x2_t vfloat16mf4x2_t;\n" "typedef __rvv_float16mf4x3_t vfloat16mf4x3_t;\n" "typedef __rvv_float16mf4x4_t vfloat16mf4x4_t;\n" "typedef __rvv_float16mf4x5_t vfloat16mf4x5_t;\n" "typedef __rvv_float16mf4x6_t vfloat16mf4x6_t;\n" "typedef __rvv_float16mf4x7_t vfloat16mf4x7_t;\n" "typedef __rvv_float16mf4x8_t vfloat16mf4x8_t;\n" "typedef __rvv_float16mf2_t vfloat16mf2_t;\n" "typedef __rvv_float16mf2x2_t vfloat16mf2x2_t;\n" "typedef __rvv_float16mf2x3_t vfloat16mf2x3_t;\n" "typedef __rvv_float16mf2x4_t vfloat16mf2x4_t;\n" "typedef __rvv_float16mf2x5_t vfloat16mf2x5_t;\n" "typedef __rvv_float16mf2x6_t vfloat16mf2x6_t;\n" "typedef __rvv_float16mf2x7_t vfloat16mf2x7_t;\n" "typedef __rvv_float16mf2x8_t vfloat16mf2x8_t;\n" "typedef __rvv_float16m1_t vfloat16m1_t;\n" "typedef __rvv_float16m1x2_t vfloat16m1x2_t;\n" "typedef __rvv_float16m1x3_t vfloat16m1x3_t;\n" "typedef __rvv_float16m1x4_t vfloat16m1x4_t;\n" "typedef __rvv_float16m1x5_t vfloat16m1x5_t;\n" "typedef __rvv_float16m1x6_t vfloat16m1x6_t;\n" "typedef __rvv_float16m1x7_t vfloat16m1x7_t;\n" "typedef __rvv_float16m1x8_t vfloat16m1x8_t;\n" "typedef __rvv_float16m2_t vfloat16m2_t;\n" "typedef __rvv_float16m2x2_t vfloat16m2x2_t;\n" "typedef __rvv_float16m2x3_t vfloat16m2x3_t;\n" "typedef __rvv_float16m2x4_t vfloat16m2x4_t;\n" "typedef __rvv_float16m4_t vfloat16m4_t;\n" "typedef __rvv_float16m4x2_t vfloat16m4x2_t;\n" "typedef __rvv_float16m8_t vfloat16m8_t;\n" "typedef __rvv_float32mf2_t vfloat32mf2_t;\n" "typedef __rvv_float32mf2x2_t vfloat32mf2x2_t;\n" "typedef __rvv_float32mf2x3_t vfloat32mf2x3_t;\n" "typedef __rvv_float32mf2x4_t vfloat32mf2x4_t;\n" "typedef __rvv_float32mf2x5_t vfloat32mf2x5_t;\n" "typedef __rvv_float32mf2x6_t vfloat32mf2x6_t;\n" "typedef __rvv_float32mf2x7_t vfloat32mf2x7_t;\n" "typedef __rvv_float32mf2x8_t vfloat32mf2x8_t;\n" "typedef __rvv_float32m1_t vfloat32m1_t;\n" "typedef __rvv_float32m1x2_t vfloat32m1x2_t;\n" "typedef __rvv_float32m1x3_t vfloat32m1x3_t;\n" "typedef __rvv_float32m1x4_t vfloat32m1x4_t;\n" "typedef __rvv_float32m1x5_t vfloat32m1x5_t;\n" "typedef __rvv_float32m1x6_t vfloat32m1x6_t;\n" "typedef __rvv_float32m1x7_t vfloat32m1x7_t;\n" "typedef __rvv_float32m1x8_t vfloat32m1x8_t;\n" "typedef __rvv_float32m2_t vfloat32m2_t;\n" "typedef __rvv_float32m2x2_t vfloat32m2x2_t;\n" "typedef __rvv_float32m2x3_t vfloat32m2x3_t;\n" "typedef __rvv_float32m2x4_t vfloat32m2x4_t;\n" "typedef __rvv_float32m4_t vfloat32m4_t;\n" "typedef __rvv_float32m4x2_t vfloat32m4x2_t;\n" "typedef __rvv_float32m8_t vfloat32m8_t;\n" "typedef __rvv_float64m1_t vfloat64m1_t;\n" "typedef __rvv_float64m1x2_t vfloat64m1x2_t;\n" "typedef __rvv_float64m1x3_t vfloat64m1x3_t;\n" "typedef __rvv_float64m1x4_t vfloat64m1x4_t;\n" "typedef __rvv_float64m1x5_t vfloat64m1x5_t;\n" "typedef __rvv_float64m1x6_t vfloat64m1x6_t;\n" "typedef __rvv_float64m1x7_t vfloat64m1x7_t;\n" "typedef __rvv_float64m1x8_t vfloat64m1x8_t;\n" "typedef __rvv_float64m2_t vfloat64m2_t;\n" "typedef __rvv_float64m2x2_t vfloat64m2x2_t;\n" "typedef __rvv_float64m2x3_t vfloat64m2x3_t;\n" "typedef __rvv_float64m2x4_t vfloat64m2x4_t;\n" "typedef __rvv_float64m4_t vfloat64m4_t;\n" "typedef __rvv_float64m4x2_t vfloat64m4x2_t;\n" "typedef __rvv_float64m8_t vfloat64m8_t;\n" "#define __riscv_v_intrinsic_overloading 1\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif // __cplusplus\n" "#endif // __RISCV_VECTOR_H\n" "" } , { "/builtins/rtmintrin.h" , "/*===---- rtmintrin.h - RTM intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __RTMINTRIN_H\n" "#define __RTMINTRIN_H\n" "\n" "#define _XBEGIN_STARTED (~0u)\n" "#define _XABORT_EXPLICIT (1 << 0)\n" "#define _XABORT_RETRY (1 << 1)\n" "#define _XABORT_CONFLICT (1 << 2)\n" "#define _XABORT_CAPACITY (1 << 3)\n" "#define _XABORT_DEBUG (1 << 4)\n" "#define _XABORT_NESTED (1 << 5)\n" "#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"rtm\")))\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "_xbegin(void)\n" "{\n" " return (unsigned int)__builtin_ia32_xbegin();\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xend(void)\n" "{\n" " __builtin_ia32_xend();\n" "}\n" "\n" "#define _xabort(imm) __builtin_ia32_xabort((imm))\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __RTMINTRIN_H */\n" "" } , { "/builtins/s390intrin.h" , "/*===---- s390intrin.h - SystemZ intrinsics --------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __S390INTRIN_H\n" "#define __S390INTRIN_H\n" "\n" "#ifndef __s390__\n" "#error \" is for s390 only\"\n" "#endif\n" "\n" "#ifdef __HTM__\n" "#include \n" "#endif\n" "\n" "#ifdef __VEC__\n" "#include \n" "#endif\n" "\n" "#endif /* __S390INTRIN_H*/\n" "" } , { "/builtins/serializeintrin.h" , "/*===--------------- serializeintrin.h - serialize intrinsics --------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __SERIALIZEINTRIN_H\n" "#define __SERIALIZEINTRIN_H\n" "\n" "/// Serialize instruction fetch and execution.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the SERIALIZE instruction.\n" "///\n" "static __inline__ void\n" "__attribute__((__always_inline__, __nodebug__, __target__(\"serialize\")))\n" "_serialize (void)\n" "{\n" " __builtin_ia32_serialize ();\n" "}\n" "\n" "#endif /* __SERIALIZEINTRIN_H */\n" "" } , { "/builtins/sgxintrin.h" , "/*===---- sgxintrin.h - X86 SGX intrinsics configuration -------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __SGXINTRIN_H\n" "#define __SGXINTRIN_H\n" "\n" "#if __has_extension(gnu_asm)\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sgx\")))\n" "\n" "static __inline unsigned int __DEFAULT_FN_ATTRS\n" "_enclu_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])\n" "{\n" " unsigned int __result;\n" " __asm__ (\"enclu\"\n" " : \"=a\" (__result), \"=b\" (__d[0]), \"=c\" (__d[1]), \"=d\" (__d[2])\n" " : \"a\" (__leaf), \"b\" (__d[0]), \"c\" (__d[1]), \"d\" (__d[2])\n" " : \"cc\");\n" " return __result;\n" "}\n" "\n" "static __inline unsigned int __DEFAULT_FN_ATTRS\n" "_encls_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])\n" "{\n" " unsigned int __result;\n" " __asm__ (\"encls\"\n" " : \"=a\" (__result), \"=b\" (__d[0]), \"=c\" (__d[1]), \"=d\" (__d[2])\n" " : \"a\" (__leaf), \"b\" (__d[0]), \"c\" (__d[1]), \"d\" (__d[2])\n" " : \"cc\");\n" " return __result;\n" "}\n" "\n" "static __inline unsigned int __DEFAULT_FN_ATTRS\n" "_enclv_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])\n" "{\n" " unsigned int __result;\n" " __asm__ (\"enclv\"\n" " : \"=a\" (__result), \"=b\" (__d[0]), \"=c\" (__d[1]), \"=d\" (__d[2])\n" " : \"a\" (__leaf), \"b\" (__d[0]), \"c\" (__d[1]), \"d\" (__d[2])\n" " : \"cc\");\n" " return __result;\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __has_extension(gnu_asm) */\n" "\n" "#endif\n" "" } , { "/builtins/sha512intrin.h" , "/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifndef __SHA512INTRIN_H\n" "#define __SHA512INTRIN_H\n" "\n" "#define __DEFAULT_FN_ATTRS256 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sha512\"), \\\n" " __min_vector_width__(256)))\n" "\n" "/// This intrinisc is one of the two SHA512 message scheduling instructions.\n" "/// The intrinsic performs an intermediate calculation for the next four\n" "/// SHA512 message qwords. The calculated results are stored in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSHA512MSG1 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x long long].\n" "/// \\param __B\n" "/// A 128-bit vector of [2 x long long].\n" "/// \\returns\n" "/// A 256-bit vector of [4 x long long].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROR64(qword, n) {\n" "/// count := n % 64\n" "/// dest := (qword >> count) | (qword << (64 - count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE SHR64(qword, n) {\n" "/// RETURN qword >> n\n" "/// }\n" "/// DEFINE s0(qword):\n" "/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7)\n" "/// }\n" "/// W[4] := __B.qword[0]\n" "/// W[3] := __A.qword[3]\n" "/// W[2] := __A.qword[2]\n" "/// W[1] := __A.qword[1]\n" "/// W[0] := __A.qword[0]\n" "/// dst.qword[3] := W[3] + s0(W[4])\n" "/// dst.qword[2] := W[2] + s0(W[3])\n" "/// dst.qword[1] := W[1] + s0(W[2])\n" "/// dst.qword[0] := W[0] + s0(W[1])\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) {\n" " return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B);\n" "}\n" "\n" "/// This intrinisc is one of the two SHA512 message scheduling instructions.\n" "/// The intrinsic performs the final calculation for the next four SHA512\n" "/// message qwords. The calculated results are stored in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSHA512MSG2 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x long long].\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x long long].\n" "/// \\returns\n" "/// A 256-bit vector of [4 x long long].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROR64(qword, n) {\n" "/// count := n % 64\n" "/// dest := (qword >> count) | (qword << (64 - count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE SHR64(qword, n) {\n" "/// RETURN qword >> n\n" "/// }\n" "/// DEFINE s1(qword) {\n" "/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6)\n" "/// }\n" "/// W[14] := __B.qword[2]\n" "/// W[15] := __B.qword[3]\n" "/// W[16] := __A.qword[0] + s1(W[14])\n" "/// W[17] := __A.qword[1] + s1(W[15])\n" "/// W[18] := __A.qword[2] + s1(W[16])\n" "/// W[19] := __A.qword[3] + s1(W[17])\n" "/// dst.qword[3] := W[19]\n" "/// dst.qword[2] := W[18]\n" "/// dst.qword[1] := W[17]\n" "/// dst.qword[0] := W[16]\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) {\n" " return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B);\n" "}\n" "\n" "/// This intrinisc performs two rounds of SHA512 operation using initial SHA512\n" "/// state (C,D,G,H) from \\a __A, an initial SHA512 state (A,B,E,F) from\n" "/// \\a __A, and a pre-computed sum of the next two round message qwords and\n" "/// the corresponding round constants from \\a __C (only the two lower qwords\n" "/// of the third operand). The updated SHA512 state (A,B,E,F) is written to\n" "/// \\a __A, and \\a __A can be used as the updated state (C,D,G,H) in later\n" "/// rounds.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSHA512RNDS2 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [4 x long long].\n" "/// \\param __B\n" "/// A 256-bit vector of [4 x long long].\n" "/// \\param __C\n" "/// A 128-bit vector of [2 x long long].\n" "/// \\returns\n" "/// A 256-bit vector of [4 x long long].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROR64(qword, n) {\n" "/// count := n % 64\n" "/// dest := (qword >> count) | (qword << (64 - count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE SHR64(qword, n) {\n" "/// RETURN qword >> n\n" "/// }\n" "/// DEFINE cap_sigma0(qword) {\n" "/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39)\n" "/// }\n" "/// DEFINE cap_sigma1(qword) {\n" "/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41)\n" "/// }\n" "/// DEFINE MAJ(a,b,c) {\n" "/// RETURN (a & b) ^ (a & c) ^ (b & c)\n" "/// }\n" "/// DEFINE CH(e,f,g) {\n" "/// RETURN (e & f) ^ (g & ~e)\n" "/// }\n" "/// A[0] := __B.qword[3]\n" "/// B[0] := __B.qword[2]\n" "/// C[0] := __C.qword[3]\n" "/// D[0] := __C.qword[2]\n" "/// E[0] := __B.qword[1]\n" "/// F[0] := __B.qword[0]\n" "/// G[0] := __C.qword[1]\n" "/// H[0] := __C.qword[0]\n" "/// WK[0]:= __A.qword[0]\n" "/// WK[1]:= __A.qword[1]\n" "/// FOR i := 0 to 1:\n" "/// A[i+1] := CH(E[i], F[i], G[i]) +\n" "/// cap_sigma1(E[i]) + WK[i] + H[i] +\n" "/// MAJ(A[i], B[i], C[i]) +\n" "/// cap_sigma0(A[i])\n" "/// B[i+1] := A[i]\n" "/// C[i+1] := B[i]\n" "/// D[i+1] := C[i]\n" "/// E[i+1] := CH(E[i], F[i], G[i]) +\n" "/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i]\n" "/// F[i+1] := E[i]\n" "/// G[i+1] := F[i]\n" "/// H[i+1] := G[i]\n" "/// ENDFOR\n" "/// dst.qword[3] := A[2]\n" "/// dst.qword[2] := B[2]\n" "/// dst.qword[1] := E[2]\n" "/// dst.qword[0] := F[2]\n" "/// dst[MAX:256] := 0\n" "/// \\endcode\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) {\n" " return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B,\n" " (__v2du)__C);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif // __SHA512INTRIN_H\n" "" } , { "/builtins/shaintrin.h" , "/*===---- shaintrin.h - SHA intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __SHAINTRIN_H\n" "#define __SHAINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"sha\"), __min_vector_width__(128)))\n" "\n" "/// Performs four iterations of the inner loop of the SHA-1 message digest\n" "/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit\n" "/// vector of [4 x i32] in \\a V1 and the next four 32-bit elements of the\n" "/// message from the 128-bit vector of [4 x i32] in \\a V2. Note that the\n" "/// SHA-1 state variable E must have already been added to \\a V2\n" "/// (\\c _mm_sha1nexte_epu32() can perform this step). Returns the updated\n" "/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32].\n" "///\n" "/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each\n" "/// with a different combining function and rounding constant. This\n" "/// intrinsic performs four iterations using a combining function and\n" "/// rounding constant selected by \\a M[1:0].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c SHA1RNDS4 instruction.\n" "///\n" "/// \\param V1\n" "/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state.\n" "/// \\param V2\n" "/// A 128-bit vector of [4 x i32] containing the next four elements of\n" "/// the message, plus SHA-1 state variable E.\n" "/// \\param M\n" "/// An immediate value where bits [1:0] select among four possible\n" "/// combining functions and rounding constants (not specified here).\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.\n" "#define _mm_sha1rnds4_epu32(V1, V2, M) \\\n" " __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))\n" "\n" "/// Calculates the SHA-1 state variable E from the SHA-1 state variables in\n" "/// the 128-bit vector of [4 x i32] in \\a __X, adds that to the next set of\n" "/// four message elements in the 128-bit vector of [4 x i32] in \\a __Y, and\n" "/// returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA1NEXTE instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing the current SHA-1 state.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing the next four elements of the\n" "/// message.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-1\n" "/// values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Performs an intermediate calculation for deriving the next four SHA-1\n" "/// message elements using previous message elements from the 128-bit\n" "/// vectors of [4 x i32] in \\a __X and \\a __Y, and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA1MSG1 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing previous message elements.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing previous message elements.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the derived SHA-1\n" "/// elements.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Performs the final calculation for deriving the next four SHA-1 message\n" "/// elements using previous message elements from the 128-bit vectors of\n" "/// [4 x i32] in \\a __X and \\a __Y, and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA1MSG2 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing an intermediate result.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing previous message values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-1\n" "/// values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Performs two rounds of SHA-256 operation using the following inputs: a\n" "/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of\n" "/// [4 x i32] in \\a __X; a starting SHA-256 state (A, B, E, F) from the\n" "/// 128-bit vector of [4 x i32] in \\a __Y; and a pre-computed sum of the\n" "/// next two message elements (unsigned 32-bit integers) and corresponding\n" "/// rounding constants from the 128-bit vector of [4 x i32] in \\a __Z.\n" "/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of\n" "/// [4 x i32].\n" "///\n" "/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic\n" "/// performs two of those iterations.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA256RNDS2 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256\n" "/// state.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256\n" "/// state.\n" "/// \\param __Z\n" "/// A 128-bit vector of [4 x i32] containing additional input to the\n" "/// SHA-256 operation.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)\n" "{\n" " return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);\n" "}\n" "\n" "/// Performs an intermediate calculation for deriving the next four SHA-256\n" "/// message elements using previous message elements from the 128-bit\n" "/// vectors of [4 x i32] in \\a __X and \\a __Y, and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA256MSG1 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing previous message elements.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing previous message elements.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-256\n" "/// values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "/// Performs the final calculation for deriving the next four SHA-256 message\n" "/// elements using previous message elements from the 128-bit vectors of\n" "/// [4 x i32] in \\a __X and \\a __Y, and returns the result.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c SHA256MSG2 instruction.\n" "///\n" "/// \\param __X\n" "/// A 128-bit vector of [4 x i32] containing an intermediate result.\n" "/// \\param __Y\n" "/// A 128-bit vector of [4 x i32] containing previous message values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the updated SHA-256\n" "/// values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)\n" "{\n" " return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __SHAINTRIN_H */\n" "" } , { "/builtins/sifive_vector.h" , "//===----- sifive_vector.h - SiFive Vector definitions --------------------===//\n" "//\n" "// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "// See https://llvm.org/LICENSE.txt for license information.\n" "// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "//\n" "//===----------------------------------------------------------------------===//\n" "\n" "#ifndef _SIFIVE_VECTOR_H_\n" "#define _SIFIVE_VECTOR_H_\n" "\n" "#include \"riscv_vector.h\"\n" "\n" "#pragma clang riscv intrinsic sifive_vector\n" "\n" "#endif //_SIFIVE_VECTOR_H_\n" "" } , { "/builtins/sm3intrin.h" , "/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifndef __SM3INTRIN_H\n" "#define __SM3INTRIN_H\n" "\n" "#define __DEFAULT_FN_ATTRS128 \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sm3\"), \\\n" " __min_vector_width__(128)))\n" "\n" "/// This intrinisc is one of the two SM3 message scheduling intrinsics. The\n" "/// intrinsic performs an initial calculation for the next four SM3 message\n" "/// words. The calculated results are stored in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM3MSG1 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x int].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32 - count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE P1(x) {\n" "/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23)\n" "/// }\n" "/// W[0] := __C.dword[0]\n" "/// W[1] := __C.dword[1]\n" "/// W[2] := __C.dword[2]\n" "/// W[3] := __C.dword[3]\n" "/// W[7] := __A.dword[0]\n" "/// W[8] := __A.dword[1]\n" "/// W[9] := __A.dword[2]\n" "/// W[10] := __A.dword[3]\n" "/// W[13] := __B.dword[0]\n" "/// W[14] := __B.dword[1]\n" "/// W[15] := __B.dword[2]\n" "/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15)\n" "/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15)\n" "/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15)\n" "/// TMP3 := W[10] ^ W[3]\n" "/// dst.dword[0] := P1(TMP0)\n" "/// dst.dword[1] := P1(TMP1)\n" "/// dst.dword[2] := P1(TMP2)\n" "/// dst.dword[3] := P1(TMP3)\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A,\n" " __m128i __B,\n" " __m128i __C) {\n" " return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B,\n" " (__v4su)__C);\n" "}\n" "\n" "/// This intrinisc is one of the two SM3 message scheduling intrinsics. The\n" "/// intrinsic performs the final calculation for the next four SM3 message\n" "/// words. The calculated results are stored in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM3MSG2 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x int].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// WTMP[0] := __A.dword[0]\n" "/// WTMP[1] := __A.dword[1]\n" "/// WTMP[2] := __A.dword[2]\n" "/// WTMP[3] := __A.dword[3]\n" "/// W[3] := __B.dword[0]\n" "/// W[4] := __B.dword[1]\n" "/// W[5] := __B.dword[2]\n" "/// W[6] := __B.dword[3]\n" "/// W[10] := __C.dword[0]\n" "/// W[11] := __C.dword[1]\n" "/// W[12] := __C.dword[2]\n" "/// W[13] := __C.dword[3]\n" "/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0]\n" "/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1]\n" "/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2]\n" "/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3]\n" "/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30)\n" "/// dst.dword[0] := W[16]\n" "/// dst.dword[1] := W[17]\n" "/// dst.dword[2] := W[18]\n" "/// dst.dword[3] := W[19]\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A,\n" " __m128i __B,\n" " __m128i __C) {\n" " return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B,\n" " (__v4su)__C);\n" "}\n" "\n" "/// This intrinsic performs two rounds of SM3 operation using initial SM3 state\n" "/// (C, D, G, H) from \\a __A, an initial SM3 states (A, B, E, F)\n" "/// from \\a __B and a pre-computed words from the \\a __C. \\a __A with\n" "/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left\n" "/// variables from previous state. The updated SM3 state (A, B, E, F) is\n" "/// written to \\a __A. The \\a imm8 should contain the even round number\n" "/// for the first of the two rounds computed by this instruction. The\n" "/// computation masks the \\a imm8 value by AND’ing it with 0x3E so that only\n" "/// even round numbers from 0 through 62 are used for this operation. The\n" "/// calculated results are stored in \\a dst.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int\n" "/// imm8) \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM3RNDS2 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __C\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param imm8\n" "/// A 8-bit constant integer.\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE P0(dword) {\n" "/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17)\n" "/// }\n" "/// DEFINE FF(x,y,z, round){\n" "/// IF round < 16\n" "/// RETURN (x ^ y ^ z)\n" "/// ELSE\n" "/// RETURN (x & y) | (x & z) | (y & z)\n" "/// FI\n" "/// }\n" "/// DEFINE GG(x, y, z, round){\n" "/// IF round < 16\n" "/// RETURN (x ^ y ^ z)\n" "/// ELSE\n" "/// RETURN (x & y) | (~x & z)\n" "/// FI\n" "/// }\n" "/// A[0] := __B.dword[3]\n" "/// B[0] := __B.dword[2]\n" "/// C[0] := __A.dword[3]\n" "/// D[0] := __A.dword[2]\n" "/// E[0] := __B.dword[1]\n" "/// F[0] := __B.dword[0]\n" "/// G[0] := __A.dword[1]\n" "/// H[0] := __A.dword[0]\n" "/// W[0] := __C.dword[0]\n" "/// W[1] := __C.dword[1]\n" "/// W[4] := __C.dword[2]\n" "/// W[5] := __C.dword[3]\n" "/// C[0] := ROL32(C[0], 9)\n" "/// D[0] := ROL32(D[0], 9)\n" "/// G[0] := ROL32(G[0], 19)\n" "/// H[0] := ROL32(H[0], 19)\n" "/// ROUND := __D & 0x3E\n" "/// IF ROUND < 16\n" "/// CONST := 0x79CC4519\n" "/// ELSE\n" "/// CONST := 0x7A879D8A\n" "/// FI\n" "/// CONST := ROL32(CONST,ROUND)\n" "/// FOR i:= 0 to 1\n" "/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7)\n" "/// S2 := S1 ^ ROL32(A[i], 12)\n" "/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4])\n" "/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i]\n" "/// D[i+1] := C[i]\n" "/// C[i+1] := ROL32(B[i],9)\n" "/// B[i+1] := A[i]\n" "/// A[i+1] := T1\n" "/// H[i+1] := G[i]\n" "/// G[i+1] := ROL32(F[i], 19)\n" "/// F[i+1] := E[i]\n" "/// E[i+1] := P0(T2)\n" "/// CONST := ROL32(CONST, 1)\n" "/// ENDFOR\n" "/// dst.dword[3] := A[2]\n" "/// dst.dword[2] := B[2]\n" "/// dst.dword[1] := E[2]\n" "/// dst.dword[0] := F[2]\n" "/// dst[MAX:128] := 0\n" "/// \\endcode\n" "#define _mm_sm3rnds2_epi32(A, B, C, D) \\\n" " (__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D)\n" "\n" "#undef __DEFAULT_FN_ATTRS128\n" "\n" "#endif // __SM3INTRIN_H\n" "" } , { "/builtins/sm4intrin.h" , "/*===--------------- sm4intrin.h - SM4 intrinsics -----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif // __IMMINTRIN_H\n" "\n" "#ifndef __SM4INTRIN_H\n" "#define __SM4INTRIN_H\n" "\n" "/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic\n" "/// operates on independent 128-bit lanes. The calculated results are\n" "/// stored in \\a dst.\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM4KEY4 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x int].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE SBOX_BYTE(dword, i) {\n" "/// RETURN sbox[dword.byte[i]]\n" "/// }\n" "/// DEFINE lower_t(dword) {\n" "/// tmp.byte[0] := SBOX_BYTE(dword, 0)\n" "/// tmp.byte[1] := SBOX_BYTE(dword, 1)\n" "/// tmp.byte[2] := SBOX_BYTE(dword, 2)\n" "/// tmp.byte[3] := SBOX_BYTE(dword, 3)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE L_KEY(dword) {\n" "/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)\n" "/// }\n" "/// DEFINE T_KEY(dword) {\n" "/// RETURN L_KEY(lower_t(dword))\n" "/// }\n" "/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {\n" "/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)\n" "/// }\n" "/// FOR i:= 0 to 0\n" "/// P[0] := __B.xmm[i].dword[0]\n" "/// P[1] := __B.xmm[i].dword[1]\n" "/// P[2] := __B.xmm[i].dword[2]\n" "/// P[3] := __B.xmm[i].dword[3]\n" "/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])\n" "/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])\n" "/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])\n" "/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])\n" "/// DEST.xmm[i].dword[0] := C[0]\n" "/// DEST.xmm[i].dword[1] := C[1]\n" "/// DEST.xmm[i].dword[2] := C[2]\n" "/// DEST.xmm[i].dword[3] := C[3]\n" "/// ENDFOR\n" "/// DEST[MAX:128] := 0\n" "/// \\endcode\n" "#define _mm_sm4key4_epi32(A, B) \\\n" " (__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B)\n" "\n" "/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic\n" "/// operates on independent 128-bit lanes. The calculated results are\n" "/// stored in \\a dst.\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM4KEY4 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x int].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE SBOX_BYTE(dword, i) {\n" "/// RETURN sbox[dword.byte[i]]\n" "/// }\n" "/// DEFINE lower_t(dword) {\n" "/// tmp.byte[0] := SBOX_BYTE(dword, 0)\n" "/// tmp.byte[1] := SBOX_BYTE(dword, 1)\n" "/// tmp.byte[2] := SBOX_BYTE(dword, 2)\n" "/// tmp.byte[3] := SBOX_BYTE(dword, 3)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE L_KEY(dword) {\n" "/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)\n" "/// }\n" "/// DEFINE T_KEY(dword) {\n" "/// RETURN L_KEY(lower_t(dword))\n" "/// }\n" "/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {\n" "/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)\n" "/// }\n" "/// FOR i:= 0 to 1\n" "/// P[0] := __B.xmm[i].dword[0]\n" "/// P[1] := __B.xmm[i].dword[1]\n" "/// P[2] := __B.xmm[i].dword[2]\n" "/// P[3] := __B.xmm[i].dword[3]\n" "/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])\n" "/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])\n" "/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])\n" "/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])\n" "/// DEST.xmm[i].dword[0] := C[0]\n" "/// DEST.xmm[i].dword[1] := C[1]\n" "/// DEST.xmm[i].dword[2] := C[2]\n" "/// DEST.xmm[i].dword[3] := C[3]\n" "/// ENDFOR\n" "/// DEST[MAX:256] := 0\n" "/// \\endcode\n" "#define _mm256_sm4key4_epi32(A, B) \\\n" " (__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B)\n" "\n" "/// This intrinisc performs four rounds of SM4 encryption. The intrinisc\n" "/// operates on independent 128-bit lanes. The calculated results are\n" "/// stored in \\a dst.\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM4RNDS4 instruction.\n" "///\n" "/// \\param __A\n" "/// A 128-bit vector of [4 x int].\n" "/// \\param __B\n" "/// A 128-bit vector of [4 x int].\n" "/// \\returns\n" "/// A 128-bit vector of [4 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE lower_t(dword) {\n" "/// tmp.byte[0] := SBOX_BYTE(dword, 0)\n" "/// tmp.byte[1] := SBOX_BYTE(dword, 1)\n" "/// tmp.byte[2] := SBOX_BYTE(dword, 2)\n" "/// tmp.byte[3] := SBOX_BYTE(dword, 3)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE L_RND(dword) {\n" "/// tmp := dword\n" "/// tmp := tmp ^ ROL32(dword, 2)\n" "/// tmp := tmp ^ ROL32(dword, 10)\n" "/// tmp := tmp ^ ROL32(dword, 18)\n" "/// tmp := tmp ^ ROL32(dword, 24)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE T_RND(dword) {\n" "/// RETURN L_RND(lower_t(dword))\n" "/// }\n" "/// DEFINE F_RND(X0, X1, X2, X3, round_key) {\n" "/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)\n" "/// }\n" "/// FOR i:= 0 to 0\n" "/// P[0] := __B.xmm[i].dword[0]\n" "/// P[1] := __B.xmm[i].dword[1]\n" "/// P[2] := __B.xmm[i].dword[2]\n" "/// P[3] := __B.xmm[i].dword[3]\n" "/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])\n" "/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])\n" "/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])\n" "/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])\n" "/// DEST.xmm[i].dword[0] := C[0]\n" "/// DEST.xmm[i].dword[1] := C[1]\n" "/// DEST.xmm[i].dword[2] := C[2]\n" "/// DEST.xmm[i].dword[3] := C[3]\n" "/// ENDFOR\n" "/// DEST[MAX:128] := 0\n" "/// \\endcode\n" "#define _mm_sm4rnds4_epi32(A, B) \\\n" " (__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B)\n" "\n" "/// This intrinisc performs four rounds of SM4 encryption. The intrinisc\n" "/// operates on independent 128-bit lanes. The calculated results are\n" "/// stored in \\a dst.\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B)\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c VSM4RNDS4 instruction.\n" "///\n" "/// \\param __A\n" "/// A 256-bit vector of [8 x int].\n" "/// \\param __B\n" "/// A 256-bit vector of [8 x int].\n" "/// \\returns\n" "/// A 256-bit vector of [8 x int].\n" "///\n" "/// \\code{.operation}\n" "/// DEFINE ROL32(dword, n) {\n" "/// count := n % 32\n" "/// dest := (dword << count) | (dword >> (32-count))\n" "/// RETURN dest\n" "/// }\n" "/// DEFINE lower_t(dword) {\n" "/// tmp.byte[0] := SBOX_BYTE(dword, 0)\n" "/// tmp.byte[1] := SBOX_BYTE(dword, 1)\n" "/// tmp.byte[2] := SBOX_BYTE(dword, 2)\n" "/// tmp.byte[3] := SBOX_BYTE(dword, 3)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE L_RND(dword) {\n" "/// tmp := dword\n" "/// tmp := tmp ^ ROL32(dword, 2)\n" "/// tmp := tmp ^ ROL32(dword, 10)\n" "/// tmp := tmp ^ ROL32(dword, 18)\n" "/// tmp := tmp ^ ROL32(dword, 24)\n" "/// RETURN tmp\n" "/// }\n" "/// DEFINE T_RND(dword) {\n" "/// RETURN L_RND(lower_t(dword))\n" "/// }\n" "/// DEFINE F_RND(X0, X1, X2, X3, round_key) {\n" "/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)\n" "/// }\n" "/// FOR i:= 0 to 0\n" "/// P[0] := __B.xmm[i].dword[0]\n" "/// P[1] := __B.xmm[i].dword[1]\n" "/// P[2] := __B.xmm[i].dword[2]\n" "/// P[3] := __B.xmm[i].dword[3]\n" "/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])\n" "/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])\n" "/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])\n" "/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])\n" "/// DEST.xmm[i].dword[0] := C[0]\n" "/// DEST.xmm[i].dword[1] := C[1]\n" "/// DEST.xmm[i].dword[2] := C[2]\n" "/// DEST.xmm[i].dword[3] := C[3]\n" "/// ENDFOR\n" "/// DEST[MAX:256] := 0\n" "/// \\endcode\n" "#define _mm256_sm4rnds4_epi32(A, B) \\\n" " (__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B)\n" "\n" "#endif // __SM4INTRIN_H\n" "" } , { "/builtins/smmintrin.h" , "/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __SMMINTRIN_H\n" "#define __SMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse4.1\"), \\\n" " __min_vector_width__(128)))\n" "\n" "/* SSE4 Rounding macros. */\n" "#define _MM_FROUND_TO_NEAREST_INT 0x00\n" "#define _MM_FROUND_TO_NEG_INF 0x01\n" "#define _MM_FROUND_TO_POS_INF 0x02\n" "#define _MM_FROUND_TO_ZERO 0x03\n" "#define _MM_FROUND_CUR_DIRECTION 0x04\n" "\n" "#define _MM_FROUND_RAISE_EXC 0x00\n" "#define _MM_FROUND_NO_EXC 0x08\n" "\n" "#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)\n" "#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)\n" "#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)\n" "#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)\n" "#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)\n" "#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)\n" "\n" "/// Rounds up each element of the 128-bit vector of [4 x float] to an\n" "/// integer and returns the rounded values in a 128-bit vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_ceil_ps(__m128 X);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float] values to be rounded up.\n" "/// \\returns A 128-bit vector of [4 x float] containing the rounded values.\n" "#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)\n" "\n" "/// Rounds up each element of the 128-bit vector of [2 x double] to an\n" "/// integer and returns the rounded values in a 128-bit vector of\n" "/// [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_ceil_pd(__m128d X);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double] values to be rounded up.\n" "/// \\returns A 128-bit vector of [2 x double] containing the rounded values.\n" "#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)\n" "\n" "/// Copies three upper elements of the first 128-bit vector operand to\n" "/// the corresponding three upper elements of the 128-bit result vector of\n" "/// [4 x float]. Rounds up the lowest element of the second 128-bit vector\n" "/// operand to an integer and copies it to the lowest element of the 128-bit\n" "/// result vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_ceil_ss(__m128 X, __m128 Y);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is\n" "/// rounded up to the nearest integer and copied to the corresponding bits\n" "/// of the result.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and rounded\n" "/// values.\n" "#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)\n" "\n" "/// Copies the upper element of the first 128-bit vector operand to the\n" "/// corresponding upper element of the 128-bit result vector of [2 x double].\n" "/// Rounds up the lower element of the second 128-bit vector operand to an\n" "/// integer and copies it to the lower element of the 128-bit result vector\n" "/// of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_ceil_sd(__m128d X, __m128d Y);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is\n" "/// rounded up to the nearest integer and copied to the corresponding bits\n" "/// of the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied and rounded\n" "/// values.\n" "#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)\n" "\n" "/// Rounds down each element of the 128-bit vector of [4 x float] to an\n" "/// an integer and returns the rounded values in a 128-bit vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_floor_ps(__m128 X);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float] values to be rounded down.\n" "/// \\returns A 128-bit vector of [4 x float] containing the rounded values.\n" "#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)\n" "\n" "/// Rounds down each element of the 128-bit vector of [2 x double] to an\n" "/// integer and returns the rounded values in a 128-bit vector of\n" "/// [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_floor_pd(__m128d X);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double].\n" "/// \\returns A 128-bit vector of [2 x double] containing the rounded values.\n" "#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)\n" "\n" "/// Copies three upper elements of the first 128-bit vector operand to\n" "/// the corresponding three upper elements of the 128-bit result vector of\n" "/// [4 x float]. Rounds down the lowest element of the second 128-bit vector\n" "/// operand to an integer and copies it to the lowest element of the 128-bit\n" "/// result vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_floor_ss(__m128 X, __m128 Y);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is\n" "/// rounded down to the nearest integer and copied to the corresponding bits\n" "/// of the result.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and rounded\n" "/// values.\n" "#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)\n" "\n" "/// Copies the upper element of the first 128-bit vector operand to the\n" "/// corresponding upper element of the 128-bit result vector of [2 x double].\n" "/// Rounds down the lower element of the second 128-bit vector operand to an\n" "/// integer and copies it to the lower element of the 128-bit result vector\n" "/// of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_floor_sd(__m128d X, __m128d Y);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is\n" "/// rounded down to the nearest integer and copied to the corresponding bits\n" "/// of the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied and rounded\n" "/// values.\n" "#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)\n" "\n" "/// Rounds each element of the 128-bit vector of [4 x float] to an\n" "/// integer value according to the rounding control specified by the second\n" "/// argument and returns the rounded values in a 128-bit vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_round_ps(__m128 X, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used \\n\n" "/// 1: The PE field is not updated \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M \\n\n" "/// 1: Use the current MXCSR setting \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest \\n\n" "/// 01: Downward (toward negative infinity) \\n\n" "/// 10: Upward (toward positive infinity) \\n\n" "/// 11: Truncated\n" "/// \\returns A 128-bit vector of [4 x float] containing the rounded values.\n" "#define _mm_round_ps(X, M) \\\n" " ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))\n" "\n" "/// Copies three upper elements of the first 128-bit vector operand to\n" "/// the corresponding three upper elements of the 128-bit result vector of\n" "/// [4 x float]. Rounds the lowest element of the second 128-bit vector\n" "/// operand to an integer value according to the rounding control specified\n" "/// by the third argument and copies it to the lowest element of the 128-bit\n" "/// result vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_round_ss(__m128 X, __m128 Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is\n" "/// rounded to the nearest integer using the specified rounding control and\n" "/// copied to the corresponding bits of the result.\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used \\n\n" "/// 1: The PE field is not updated \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M \\n\n" "/// 1: Use the current MXCSR setting \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest \\n\n" "/// 01: Downward (toward negative infinity) \\n\n" "/// 10: Upward (toward positive infinity) \\n\n" "/// 11: Truncated\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and rounded\n" "/// values.\n" "#define _mm_round_ss(X, Y, M) \\\n" " ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \\\n" " (M)))\n" "\n" "/// Rounds each element of the 128-bit vector of [2 x double] to an\n" "/// integer value according to the rounding control specified by the second\n" "/// argument and returns the rounded values in a 128-bit vector of\n" "/// [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_round_pd(__m128d X, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used \\n\n" "/// 1: The PE field is not updated \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M \\n\n" "/// 1: Use the current MXCSR setting \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest \\n\n" "/// 01: Downward (toward negative infinity) \\n\n" "/// 10: Upward (toward positive infinity) \\n\n" "/// 11: Truncated\n" "/// \\returns A 128-bit vector of [2 x double] containing the rounded values.\n" "#define _mm_round_pd(X, M) \\\n" " ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))\n" "\n" "/// Copies the upper element of the first 128-bit vector operand to the\n" "/// corresponding upper element of the 128-bit result vector of [2 x double].\n" "/// Rounds the lower element of the second 128-bit vector operand to an\n" "/// integer value according to the rounding control specified by the third\n" "/// argument and copies it to the lower element of the 128-bit result vector\n" "/// of [2 x double].\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_round_sd(__m128d X, __m128d Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is\n" "/// copied to the corresponding bits of the result.\n" "/// \\param Y\n" "/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is\n" "/// rounded to the nearest integer using the specified rounding control and\n" "/// copied to the corresponding bits of the result.\n" "/// \\param M\n" "/// An integer value that specifies the rounding operation. \\n\n" "/// Bits [7:4] are reserved. \\n\n" "/// Bit [3] is a precision exception value: \\n\n" "/// 0: A normal PE exception is used \\n\n" "/// 1: The PE field is not updated \\n\n" "/// Bit [2] is the rounding control source: \\n\n" "/// 0: Use bits [1:0] of \\a M \\n\n" "/// 1: Use the current MXCSR setting \\n\n" "/// Bits [1:0] contain the rounding control definition: \\n\n" "/// 00: Nearest \\n\n" "/// 01: Downward (toward negative infinity) \\n\n" "/// 10: Upward (toward positive infinity) \\n\n" "/// 11: Truncated\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied and rounded\n" "/// values.\n" "#define _mm_round_sd(X, Y, M) \\\n" " ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \\\n" " (M)))\n" "\n" "/* SSE4 Packed Blending Intrinsics. */\n" "/// Returns a 128-bit vector of [2 x double] where the values are\n" "/// selected from either the first or second operand as specified by the\n" "/// third operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_blend_pd(__m128d V1, __m128d V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction.\n" "///\n" "/// \\param V1\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param V2\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param M\n" "/// An immediate integer operand, with mask bits [1:0] specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// index of a copied value. When a mask bit is 0, the corresponding 64-bit\n" "/// element in operand \\a V1 is copied to the same position in the result.\n" "/// When a mask bit is 1, the corresponding 64-bit element in operand \\a V2\n" "/// is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied values.\n" "#define _mm_blend_pd(V1, V2, M) \\\n" " ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \\\n" " (__v2df)(__m128d)(V2), (int)(M)))\n" "\n" "/// Returns a 128-bit vector of [4 x float] where the values are selected\n" "/// from either the first or second operand as specified by the third\n" "/// operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_blend_ps(__m128 V1, __m128 V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VBLENDPS / BLENDPS instruction.\n" "///\n" "/// \\param V1\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param V2\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param M\n" "/// An immediate integer operand, with mask bits [3:0] specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// index of a copied value. When a mask bit is 0, the corresponding 32-bit\n" "/// element in operand \\a V1 is copied to the same position in the result.\n" "/// When a mask bit is 1, the corresponding 32-bit element in operand \\a V2\n" "/// is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied values.\n" "#define _mm_blend_ps(V1, V2, M) \\\n" " ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \\\n" " (int)(M)))\n" "\n" "/// Returns a 128-bit vector of [2 x double] where the values are\n" "/// selected from either the first or second operand as specified by the\n" "/// third operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDVPD / BLENDVPD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __V2\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param __M\n" "/// A 128-bit vector operand, with mask bits 127 and 63 specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// most significant bit of a copied value. When a mask bit is 0, the\n" "/// corresponding 64-bit element in operand \\a __V1 is copied to the same\n" "/// position in the result. When a mask bit is 1, the corresponding 64-bit\n" "/// element in operand \\a __V2 is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [2 x double] containing the copied values.\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,\n" " __m128d __V2,\n" " __m128d __M) {\n" " return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,\n" " (__v2df)__M);\n" "}\n" "\n" "/// Returns a 128-bit vector of [4 x float] where the values are\n" "/// selected from either the first or second operand as specified by the\n" "/// third operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDVPS / BLENDVPS instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __M\n" "/// A 128-bit vector operand, with mask bits 127, 95, 63, and 31 specifying\n" "/// how the values are to be copied. The position of the mask bit corresponds\n" "/// to the most significant bit of a copied value. When a mask bit is 0, the\n" "/// corresponding 32-bit element in operand \\a __V1 is copied to the same\n" "/// position in the result. When a mask bit is 1, the corresponding 32-bit\n" "/// element in operand \\a __V2 is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,\n" " __m128 __V2,\n" " __m128 __M) {\n" " return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,\n" " (__v4sf)__M);\n" "}\n" "\n" "/// Returns a 128-bit vector of [16 x i8] where the values are selected\n" "/// from either of the first or second operand as specified by the third\n" "/// operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPBLENDVB / PBLENDVB instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param __V2\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param __M\n" "/// A 128-bit vector operand, with mask bits 127, 119, 111...7 specifying\n" "/// how the values are to be copied. The position of the mask bit corresponds\n" "/// to the most significant bit of a copied value. When a mask bit is 0, the\n" "/// corresponding 8-bit element in operand \\a __V1 is copied to the same\n" "/// position in the result. When a mask bit is 1, the corresponding 8-bit\n" "/// element in operand \\a __V2 is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [16 x i8] containing the copied values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,\n" " __m128i __V2,\n" " __m128i __M) {\n" " return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,\n" " (__v16qi)__M);\n" "}\n" "\n" "/// Returns a 128-bit vector of [8 x i16] where the values are selected\n" "/// from either of the first or second operand as specified by the third\n" "/// operand, the control mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_blend_epi16(__m128i V1, __m128i V2, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPBLENDW / PBLENDW instruction.\n" "///\n" "/// \\param V1\n" "/// A 128-bit vector of [8 x i16].\n" "/// \\param V2\n" "/// A 128-bit vector of [8 x i16].\n" "/// \\param M\n" "/// An immediate integer operand, with mask bits [7:0] specifying how the\n" "/// values are to be copied. The position of the mask bit corresponds to the\n" "/// index of a copied value. When a mask bit is 0, the corresponding 16-bit\n" "/// element in operand \\a V1 is copied to the same position in the result.\n" "/// When a mask bit is 1, the corresponding 16-bit element in operand \\a V2\n" "/// is copied to the same position in the result.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the copied values.\n" "#define _mm_blend_epi16(V1, V2, M) \\\n" " ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \\\n" " (__v8hi)(__m128i)(V2), (int)(M)))\n" "\n" "/* SSE4 Dword Multiply Instructions. */\n" "/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]\n" "/// and returns the lower 32 bits of the each product in a 128-bit vector of\n" "/// [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULLD / PMULLD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit integer vector.\n" "/// \\param __V2\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the products of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)((__v4su)__V1 * (__v4su)__V2);\n" "}\n" "\n" "/// Multiplies corresponding even-indexed elements of two 128-bit\n" "/// vectors of [4 x i32] and returns a 128-bit vector of [2 x i64]\n" "/// containing the products.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMULDQ / PMULDQ instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\returns A 128-bit vector of [2 x i64] containing the products of both\n" "/// operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);\n" "}\n" "\n" "/* SSE4 Floating Point Dot Product Instructions. */\n" "/// Computes the dot product of the two 128-bit vectors of [4 x float]\n" "/// and returns it in the elements of the 128-bit result vector of\n" "/// [4 x float].\n" "///\n" "/// The immediate integer operand controls which input elements\n" "/// will contribute to the dot product, and where the final results are\n" "/// returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_dp_ps(__m128 X, __m128 Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VDPPS / DPPS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param Y\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param M\n" "/// An immediate integer operand. Mask bits [7:4] determine which elements\n" "/// of the input vectors are used, with bit [4] corresponding to the lowest\n" "/// element and bit [7] corresponding to the highest element of each [4 x\n" "/// float] vector. If a bit is set, the corresponding elements from the two\n" "/// input vectors are used as an input for dot product; otherwise that input\n" "/// is treated as zero. Bits [3:0] determine which elements of the result\n" "/// will receive a copy of the final dot product, with bit [0] corresponding\n" "/// to the lowest element and bit [3] corresponding to the highest element of\n" "/// each [4 x float] subvector. If a bit is set, the dot product is returned\n" "/// in the corresponding element; otherwise that element is set to zero.\n" "/// \\returns A 128-bit vector of [4 x float] containing the dot product.\n" "#define _mm_dp_ps(X, Y, M) \\\n" " ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))\n" "\n" "/// Computes the dot product of the two 128-bit vectors of [2 x double]\n" "/// and returns it in the elements of the 128-bit result vector of\n" "/// [2 x double].\n" "///\n" "/// The immediate integer operand controls which input\n" "/// elements will contribute to the dot product, and where the final results\n" "/// are returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128d _mm_dp_pd(__m128d X, __m128d Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VDPPD / DPPD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param Y\n" "/// A 128-bit vector of [2 x double].\n" "/// \\param M\n" "/// An immediate integer operand. Mask bits [5:4] determine which elements\n" "/// of the input vectors are used, with bit [4] corresponding to the lowest\n" "/// element and bit [5] corresponding to the highest element of each of [2 x\n" "/// double] vector. If a bit is set, the corresponding elements from the two\n" "/// input vectors are used as an input for dot product; otherwise that input\n" "/// is treated as zero. Bits [1:0] determine which elements of the result\n" "/// will receive a copy of the final dot product, with bit [0] corresponding\n" "/// to the lowest element and bit [1] corresponding to the highest element of\n" "/// each [2 x double] vector. If a bit is set, the dot product is returned in\n" "/// the corresponding element; otherwise that element is set to zero.\n" "#define _mm_dp_pd(X, Y, M) \\\n" " ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \\\n" " (M)))\n" "\n" "/* SSE4 Streaming Load Hint Instruction. */\n" "/// Loads integer values from a 128-bit aligned memory location to a\n" "/// 128-bit integer vector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTDQA / MOVNTDQA instruction.\n" "///\n" "/// \\param __V\n" "/// A pointer to a 128-bit aligned memory location that contains the integer\n" "/// values.\n" "/// \\returns A 128-bit integer vector containing the data stored at the\n" "/// specified memory location.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_stream_load_si128(__m128i const *__V) {\n" " return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);\n" "}\n" "\n" "/* SSE4 Packed Integer Min/Max Instructions. */\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the lesser\n" "/// of the two values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINSB / PMINSB instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param __V2\n" "/// A 128-bit vector of [16 x i8]\n" "/// \\returns A 128-bit vector of [16 x i8] containing the lesser values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the\n" "/// greater value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXSB / PMAXSB instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param __V2\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\returns A 128-bit vector of [16 x i8] containing the greater values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the lesser\n" "/// value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINUW / PMINUW instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [8 x u16].\n" "/// \\param __V2\n" "/// A 128-bit vector of [8 x u16].\n" "/// \\returns A 128-bit vector of [8 x u16] containing the lesser values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the\n" "/// greater value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXUW / PMAXUW instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [8 x u16].\n" "/// \\param __V2\n" "/// A 128-bit vector of [8 x u16].\n" "/// \\returns A 128-bit vector of [8 x u16] containing the greater values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the lesser\n" "/// value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINSD / PMINSD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\returns A 128-bit vector of [4 x i32] containing the lesser values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the\n" "/// greater value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXSD / PMAXSD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\returns A 128-bit vector of [4 x i32] containing the greater values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the lesser\n" "/// value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMINUD / PMINUD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x u32].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x u32].\n" "/// \\returns A 128-bit vector of [4 x u32] containing the lesser values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);\n" "}\n" "\n" "/// Compares the corresponding elements of two 128-bit vectors of\n" "/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the\n" "/// greater value of the two.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMAXUD / PMAXUD instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x u32].\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x u32].\n" "/// \\returns A 128-bit vector of [4 x u32] containing the greater values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);\n" "}\n" "\n" "/* SSE4 Insertion and Extraction from XMM Register Instructions. */\n" "/// Takes the first argument \\a X and inserts an element from the second\n" "/// argument \\a Y as selected by the third argument \\a N. That result then\n" "/// has elements zeroed out also as selected by the third argument \\a N. The\n" "/// resulting 128-bit vector of [4 x float] is then returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_insert_ps(__m128 X, __m128 Y, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VINSERTPS instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector source operand of [4 x float]. With the exception of\n" "/// those bits in the result copied from parameter \\a Y and zeroed by bits\n" "/// [3:0] of \\a N, all bits from this parameter are copied to the result.\n" "/// \\param Y\n" "/// A 128-bit vector source operand of [4 x float]. One single-precision\n" "/// floating-point element from this source, as determined by the immediate\n" "/// parameter, is copied to the result.\n" "/// \\param N\n" "/// Specifies which bits from operand \\a Y will be copied, which bits in the\n" "/// result they will be copied to, and which bits in the result will be\n" "/// cleared. The following assignments are made: \\n\n" "/// Bits [7:6] specify the bits to copy from operand \\a Y: \\n\n" "/// 00: Selects bits [31:0] from operand \\a Y. \\n\n" "/// 01: Selects bits [63:32] from operand \\a Y. \\n\n" "/// 10: Selects bits [95:64] from operand \\a Y. \\n\n" "/// 11: Selects bits [127:96] from operand \\a Y. \\n\n" "/// Bits [5:4] specify the bits in the result to which the selected bits\n" "/// from operand \\a Y are copied: \\n\n" "/// 00: Copies the selected bits from \\a Y to result bits [31:0]. \\n\n" "/// 01: Copies the selected bits from \\a Y to result bits [63:32]. \\n\n" "/// 10: Copies the selected bits from \\a Y to result bits [95:64]. \\n\n" "/// 11: Copies the selected bits from \\a Y to result bits [127:96]. \\n\n" "/// Bits[3:0]: If any of these bits are set, the corresponding result\n" "/// element is cleared.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied\n" "/// single-precision floating point elements from the operands.\n" "#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))\n" "\n" "/// Extracts a 32-bit integer from a 128-bit vector of [4 x float] and\n" "/// returns it, using the immediate value parameter \\a N as a selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_extract_ps(__m128 X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VEXTRACTPS / EXTRACTPS \n" "/// instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param N\n" "/// An immediate value. Bits [1:0] determines which bits from the argument\n" "/// \\a X are extracted and returned: \\n\n" "/// 00: Bits [31:0] of parameter \\a X are returned. \\n\n" "/// 01: Bits [63:32] of parameter \\a X are returned. \\n\n" "/// 10: Bits [95:64] of parameter \\a X are returned. \\n\n" "/// 11: Bits [127:96] of parameter \\a X are returned.\n" "/// \\returns A 32-bit integer containing the extracted 32 bits of float data.\n" "#define _mm_extract_ps(X, N) \\\n" " __builtin_bit_cast( \\\n" " int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))\n" "\n" "/* Miscellaneous insert and extract macros. */\n" "/* Extract a single-precision float from X at index N into D. */\n" "#define _MM_EXTRACT_FLOAT(D, X, N) \\\n" " do { \\\n" " (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \\\n" " } while (0)\n" "\n" "/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create\n" " an index suitable for _mm_insert_ps. */\n" "#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))\n" "\n" "/* Extract a float from X at index N into the first index of the return. */\n" "#define _MM_PICK_OUT_PS(X, N) \\\n" " _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))\n" "\n" "/* Insert int into packed integer array at index. */\n" "/// Constructs a 128-bit vector of [16 x i8] by first making a copy of\n" "/// the 128-bit integer vector parameter, and then inserting the lower 8 bits\n" "/// of an integer parameter \\a I into an offset specified by the immediate\n" "/// value parameter \\a N.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_insert_epi8(__m128i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPINSRB / PINSRB instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector of [16 x i8]. This vector is copied to the\n" "/// result and then one of the sixteen elements in the result vector is\n" "/// replaced by the lower 8 bits of \\a I.\n" "/// \\param I\n" "/// An integer. The lower 8 bits of this operand are written to the result\n" "/// beginning at the offset specified by \\a N.\n" "/// \\param N\n" "/// An immediate value. Bits [3:0] specify the bit offset in the result at\n" "/// which the lower 8 bits of \\a I are written. \\n\n" "/// 0000: Bits [7:0] of the result are used for insertion. \\n\n" "/// 0001: Bits [15:8] of the result are used for insertion. \\n\n" "/// 0010: Bits [23:16] of the result are used for insertion. \\n\n" "/// 0011: Bits [31:24] of the result are used for insertion. \\n\n" "/// 0100: Bits [39:32] of the result are used for insertion. \\n\n" "/// 0101: Bits [47:40] of the result are used for insertion. \\n\n" "/// 0110: Bits [55:48] of the result are used for insertion. \\n\n" "/// 0111: Bits [63:56] of the result are used for insertion. \\n\n" "/// 1000: Bits [71:64] of the result are used for insertion. \\n\n" "/// 1001: Bits [79:72] of the result are used for insertion. \\n\n" "/// 1010: Bits [87:80] of the result are used for insertion. \\n\n" "/// 1011: Bits [95:88] of the result are used for insertion. \\n\n" "/// 1100: Bits [103:96] of the result are used for insertion. \\n\n" "/// 1101: Bits [111:104] of the result are used for insertion. \\n\n" "/// 1110: Bits [119:112] of the result are used for insertion. \\n\n" "/// 1111: Bits [127:120] of the result are used for insertion.\n" "/// \\returns A 128-bit integer vector containing the constructed values.\n" "#define _mm_insert_epi8(X, I, N) \\\n" " ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \\\n" " (int)(N)))\n" "\n" "/// Constructs a 128-bit vector of [4 x i32] by first making a copy of\n" "/// the 128-bit integer vector parameter, and then inserting the 32-bit\n" "/// integer parameter \\a I at the offset specified by the immediate value\n" "/// parameter \\a N.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_insert_epi32(__m128i X, int I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPINSRD / PINSRD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector of [4 x i32]. This vector is copied to the\n" "/// result and then one of the four elements in the result vector is\n" "/// replaced by \\a I.\n" "/// \\param I\n" "/// A 32-bit integer that is written to the result beginning at the offset\n" "/// specified by \\a N.\n" "/// \\param N\n" "/// An immediate value. Bits [1:0] specify the bit offset in the result at\n" "/// which the integer \\a I is written. \\n\n" "/// 00: Bits [31:0] of the result are used for insertion. \\n\n" "/// 01: Bits [63:32] of the result are used for insertion. \\n\n" "/// 10: Bits [95:64] of the result are used for insertion. \\n\n" "/// 11: Bits [127:96] of the result are used for insertion.\n" "/// \\returns A 128-bit integer vector containing the constructed values.\n" "#define _mm_insert_epi32(X, I, N) \\\n" " ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \\\n" " (int)(N)))\n" "\n" "#ifdef __x86_64__\n" "/// Constructs a 128-bit vector of [2 x i64] by first making a copy of\n" "/// the 128-bit integer vector parameter, and then inserting the 64-bit\n" "/// integer parameter \\a I, using the immediate value parameter \\a N as an\n" "/// insertion location selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_insert_epi64(__m128i X, long long I, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPINSRQ / PINSRQ instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector of [2 x i64]. This vector is copied to the\n" "/// result and then one of the two elements in the result vector is replaced\n" "/// by \\a I.\n" "/// \\param I\n" "/// A 64-bit integer that is written to the result beginning at the offset\n" "/// specified by \\a N.\n" "/// \\param N\n" "/// An immediate value. Bit [0] specifies the bit offset in the result at\n" "/// which the integer \\a I is written. \\n\n" "/// 0: Bits [63:0] of the result are used for insertion. \\n\n" "/// 1: Bits [127:64] of the result are used for insertion. \\n\n" "/// \\returns A 128-bit integer vector containing the constructed values.\n" "#define _mm_insert_epi64(X, I, N) \\\n" " ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \\\n" " (int)(N)))\n" "#endif /* __x86_64__ */\n" "\n" "/* Extract int from packed integer array at index. This returns the element\n" " * as a zero extended value, so it is unsigned.\n" " */\n" "/// Extracts an 8-bit element from the 128-bit integer vector of\n" "/// [16 x i8], using the immediate value parameter \\a N as a selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_extract_epi8(__m128i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPEXTRB / PEXTRB instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector.\n" "/// \\param N\n" "/// An immediate value. Bits [3:0] specify which 8-bit vector element from\n" "/// the argument \\a X to extract and copy to the result. \\n\n" "/// 0000: Bits [7:0] of parameter \\a X are extracted. \\n\n" "/// 0001: Bits [15:8] of the parameter \\a X are extracted. \\n\n" "/// 0010: Bits [23:16] of the parameter \\a X are extracted. \\n\n" "/// 0011: Bits [31:24] of the parameter \\a X are extracted. \\n\n" "/// 0100: Bits [39:32] of the parameter \\a X are extracted. \\n\n" "/// 0101: Bits [47:40] of the parameter \\a X are extracted. \\n\n" "/// 0110: Bits [55:48] of the parameter \\a X are extracted. \\n\n" "/// 0111: Bits [63:56] of the parameter \\a X are extracted. \\n\n" "/// 1000: Bits [71:64] of the parameter \\a X are extracted. \\n\n" "/// 1001: Bits [79:72] of the parameter \\a X are extracted. \\n\n" "/// 1010: Bits [87:80] of the parameter \\a X are extracted. \\n\n" "/// 1011: Bits [95:88] of the parameter \\a X are extracted. \\n\n" "/// 1100: Bits [103:96] of the parameter \\a X are extracted. \\n\n" "/// 1101: Bits [111:104] of the parameter \\a X are extracted. \\n\n" "/// 1110: Bits [119:112] of the parameter \\a X are extracted. \\n\n" "/// 1111: Bits [127:120] of the parameter \\a X are extracted.\n" "/// \\returns An unsigned integer, whose lower 8 bits are selected from the\n" "/// 128-bit integer vector parameter and the remaining bits are assigned\n" "/// zeros.\n" "#define _mm_extract_epi8(X, N) \\\n" " ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \\\n" " (int)(N)))\n" "\n" "/// Extracts a 32-bit element from the 128-bit integer vector of\n" "/// [4 x i32], using the immediate value parameter \\a N as a selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_extract_epi32(__m128i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPEXTRD / PEXTRD instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector.\n" "/// \\param N\n" "/// An immediate value. Bits [1:0] specify which 32-bit vector element from\n" "/// the argument \\a X to extract and copy to the result. \\n\n" "/// 00: Bits [31:0] of the parameter \\a X are extracted. \\n\n" "/// 01: Bits [63:32] of the parameter \\a X are extracted. \\n\n" "/// 10: Bits [95:64] of the parameter \\a X are extracted. \\n\n" "/// 11: Bits [127:96] of the parameter \\a X are exracted.\n" "/// \\returns An integer, whose lower 32 bits are selected from the 128-bit\n" "/// integer vector parameter and the remaining bits are assigned zeros.\n" "#define _mm_extract_epi32(X, N) \\\n" " ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))\n" "\n" "/// Extracts a 64-bit element from the 128-bit integer vector of\n" "/// [2 x i64], using the immediate value parameter \\a N as a selector.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// long long _mm_extract_epi64(__m128i X, const int N);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction\n" "/// in 64-bit mode.\n" "///\n" "/// \\param X\n" "/// A 128-bit integer vector.\n" "/// \\param N\n" "/// An immediate value. Bit [0] specifies which 64-bit vector element from\n" "/// the argument \\a X to return. \\n\n" "/// 0: Bits [63:0] are returned. \\n\n" "/// 1: Bits [127:64] are returned. \\n\n" "/// \\returns A 64-bit integer.\n" "#define _mm_extract_epi64(X, N) \\\n" " ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))\n" "\n" "/* SSE4 128-bit Packed Integer Comparisons. */\n" "/// Tests whether the specified bits in a 128-bit integer vector are all\n" "/// zeros.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param __M\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\param __V\n" "/// A 128-bit integer vector selecting which bits to test in operand \\a __M.\n" "/// \\returns TRUE if the specified bits are all zeros; FALSE otherwise.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,\n" " __m128i __V) {\n" " return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);\n" "}\n" "\n" "/// Tests whether the specified bits in a 128-bit integer vector are all\n" "/// ones.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param __M\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\param __V\n" "/// A 128-bit integer vector selecting which bits to test in operand \\a __M.\n" "/// \\returns TRUE if the specified bits are all ones; FALSE otherwise.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,\n" " __m128i __V) {\n" " return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);\n" "}\n" "\n" "/// Tests whether the specified bits in a 128-bit integer vector are\n" "/// neither all zeros nor all ones.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param __M\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\param __V\n" "/// A 128-bit integer vector selecting which bits to test in operand \\a __M.\n" "/// \\returns TRUE if the specified bits are neither all zeros nor all ones;\n" "/// FALSE otherwise.\n" "static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,\n" " __m128i __V) {\n" " return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);\n" "}\n" "\n" "/// Tests whether the specified bits in a 128-bit integer vector are all\n" "/// ones.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_test_all_ones(__m128i V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param V\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\returns TRUE if the bits specified in the operand are all set to 1; FALSE\n" "/// otherwise.\n" "#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_set1_epi32(-1))\n" "\n" "/// Tests whether the specified bits in a 128-bit integer vector are\n" "/// neither all zeros nor all ones.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_test_mix_ones_zeros(__m128i M, __m128i V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param M\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\param V\n" "/// A 128-bit integer vector selecting which bits to test in operand \\a M.\n" "/// \\returns TRUE if the specified bits are neither all zeros nor all ones;\n" "/// FALSE otherwise.\n" "#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))\n" "\n" "/// Tests whether the specified bits in a 128-bit integer vector are all\n" "/// zeros.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_test_all_zeros(__m128i M, __m128i V);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPTEST / PTEST instruction.\n" "///\n" "/// \\param M\n" "/// A 128-bit integer vector containing the bits to be tested.\n" "/// \\param V\n" "/// A 128-bit integer vector selecting which bits to test in operand \\a M.\n" "/// \\returns TRUE if the specified bits are all zeros; FALSE otherwise.\n" "#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))\n" "\n" "/* SSE4 64-bit Packed Integer Comparisons. */\n" "/// Compares each of the corresponding 64-bit values of the 128-bit\n" "/// integer vectors for equality.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPEQQ / PCMPEQQ instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit integer vector.\n" "/// \\param __V2\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)((__v2di)__V1 == (__v2di)__V2);\n" "}\n" "\n" "/* SSE4 Packed Integer Sign-Extension. */\n" "/// Sign-extends each of the lower eight 8-bit integer elements of a\n" "/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a\n" "/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector\n" "/// are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXBW / PMOVSXBW instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are\n" "/// sign-extended to 16-bit values.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,\n" " 7),\n" " __v8hi);\n" "}\n" "\n" "/// Sign-extends each of the lower four 8-bit integer elements of a\n" "/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a\n" "/// 128-bit vector of [4 x i32]. The upper twelve elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXBD / PMOVSXBD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are\n" "/// sign-extended to 32-bit values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);\n" "}\n" "\n" "/// Sign-extends each of the lower two 8-bit integer elements of a\n" "/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXBQ / PMOVSXBQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are\n" "/// sign-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {\n" " /* This function always performs a signed extension, but __v16qi is a char\n" " which may be signed or unsigned, so use __v16qs. */\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);\n" "}\n" "\n" "/// Sign-extends each of the lower four 16-bit integer elements of a\n" "/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in\n" "/// a 128-bit vector of [4 x i32]. The upper four elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXWD / PMOVSXWD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are\n" "/// sign-extended to 32-bit values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);\n" "}\n" "\n" "/// Sign-extends each of the lower two 16-bit integer elements of a\n" "/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper six elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXWQ / PMOVSXWQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are\n" "/// sign-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);\n" "}\n" "\n" "/// Sign-extends each of the lower two 32-bit integer elements of a\n" "/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector\n" "/// are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVSXDQ / PMOVSXDQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are\n" "/// sign-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the sign-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);\n" "}\n" "\n" "/* SSE4 Packed Integer Zero-Extension. */\n" "/// Zero-extends each of the lower eight 8-bit integer elements of a\n" "/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a\n" "/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector\n" "/// are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXBW / PMOVZXBW instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are\n" "/// zero-extended to 16-bit values.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,\n" " 7),\n" " __v8hi);\n" "}\n" "\n" "/// Zero-extends each of the lower four 8-bit integer elements of a\n" "/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a\n" "/// 128-bit vector of [4 x i32]. The upper twelve elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXBD / PMOVZXBD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are\n" "/// zero-extended to 32-bit values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);\n" "}\n" "\n" "/// Zero-extends each of the lower two 8-bit integer elements of a\n" "/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXBQ / PMOVZXBQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are\n" "/// zero-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);\n" "}\n" "\n" "/// Zero-extends each of the lower four 16-bit integer elements of a\n" "/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in\n" "/// a 128-bit vector of [4 x i32]. The upper four elements of the input\n" "/// vector are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXWD / PMOVZXWD instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are\n" "/// zero-extended to 32-bit values.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);\n" "}\n" "\n" "/// Zero-extends each of the lower two 16-bit integer elements of a\n" "/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper six elements of the input vector\n" "/// are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXWQ / PMOVZXWQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are\n" "/// zero-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);\n" "}\n" "\n" "/// Zero-extends each of the lower two 32-bit integer elements of a\n" "/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in\n" "/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector\n" "/// are unused.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPMOVZXDQ / PMOVZXDQ instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are\n" "/// zero-extended to 64-bit values.\n" "/// \\returns A 128-bit vector of [2 x i64] containing the zero-extended values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {\n" " return (__m128i) __builtin_convertvector(\n" " __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);\n" "}\n" "\n" "/* SSE4 Pack with Unsigned Saturation. */\n" "/// Converts 32-bit signed integers from both 128-bit integer vector\n" "/// operands into 16-bit unsigned integers, and returns the packed result.\n" "/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than\n" "/// 0x0000 are saturated to 0x0000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPACKUSDW / PACKUSDW instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a\n" "/// signed integer and is converted to a 16-bit unsigned integer with\n" "/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values\n" "/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values\n" "/// are written to the lower 64 bits of the result.\n" "/// \\param __V2\n" "/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a\n" "/// signed integer and is converted to a 16-bit unsigned integer with\n" "/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values\n" "/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values\n" "/// are written to the higher 64 bits of the result.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the converted values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);\n" "}\n" "\n" "/* SSE4 Multiple Packed Sums of Absolute Difference. */\n" "/// Subtracts 8-bit unsigned integer values and computes the absolute\n" "/// values of the differences to the corresponding bits in the destination.\n" "/// Then sums of the absolute differences are returned according to the bit\n" "/// fields in the immediate operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_mpsadbw_epu8(__m128i X, __m128i Y, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VMPSADBW / MPSADBW instruction.\n" "///\n" "/// \\param X\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param Y\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying how the absolute differences are to\n" "/// be calculated, according to the following algorithm:\n" "/// \\code\n" "/// // M2 represents bit 2 of the immediate operand\n" "/// // M10 represents bits [1:0] of the immediate operand\n" "/// i = M2 * 4;\n" "/// j = M10 * 4;\n" "/// for (k = 0; k < 8; k = k + 1) {\n" "/// d0 = abs(X[i + k + 0] - Y[j + 0]);\n" "/// d1 = abs(X[i + k + 1] - Y[j + 1]);\n" "/// d2 = abs(X[i + k + 2] - Y[j + 2]);\n" "/// d3 = abs(X[i + k + 3] - Y[j + 3]);\n" "/// r[k] = d0 + d1 + d2 + d3;\n" "/// }\n" "/// \\endcode\n" "/// \\returns A 128-bit integer vector containing the sums of the sets of\n" "/// absolute differences between both operands.\n" "#define _mm_mpsadbw_epu8(X, Y, M) \\\n" " ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \\\n" " (__v16qi)(__m128i)(Y), (M)))\n" "\n" "/// Finds the minimum unsigned 16-bit element in the input 128-bit\n" "/// vector of [8 x u16] and returns it and along with its index.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPHMINPOSUW / PHMINPOSUW \n" "/// instruction.\n" "///\n" "/// \\param __V\n" "/// A 128-bit vector of [8 x u16].\n" "/// \\returns A 128-bit value where bits [15:0] contain the minimum value found\n" "/// in parameter \\a __V, bits [18:16] contain the index of the minimum value\n" "/// and the remaining bits are set to 0.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {\n" " return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);\n" "}\n" "\n" "/* Handle the sse4.2 definitions here. */\n" "\n" "/* These definitions are normally in nmmintrin.h, but gcc puts them in here\n" " so we'll do the same. */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"sse4.2\")))\n" "\n" "/* These specify the type of data that we're comparing. */\n" "#define _SIDD_UBYTE_OPS 0x00\n" "#define _SIDD_UWORD_OPS 0x01\n" "#define _SIDD_SBYTE_OPS 0x02\n" "#define _SIDD_SWORD_OPS 0x03\n" "\n" "/* These specify the type of comparison operation. */\n" "#define _SIDD_CMP_EQUAL_ANY 0x00\n" "#define _SIDD_CMP_RANGES 0x04\n" "#define _SIDD_CMP_EQUAL_EACH 0x08\n" "#define _SIDD_CMP_EQUAL_ORDERED 0x0c\n" "\n" "/* These macros specify the polarity of the operation. */\n" "#define _SIDD_POSITIVE_POLARITY 0x00\n" "#define _SIDD_NEGATIVE_POLARITY 0x10\n" "#define _SIDD_MASKED_POSITIVE_POLARITY 0x20\n" "#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30\n" "\n" "/* These macros are used in _mm_cmpXstri() to specify the return. */\n" "#define _SIDD_LEAST_SIGNIFICANT 0x00\n" "#define _SIDD_MOST_SIGNIFICANT 0x40\n" "\n" "/* These macros are used in _mm_cmpXstri() to specify the return. */\n" "#define _SIDD_BIT_MASK 0x00\n" "#define _SIDD_UNIT_MASK 0x40\n" "\n" "/* SSE4.2 Packed Comparison Intrinsics. */\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns a 128-bit integer vector representing the result\n" "/// mask of the comparison.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_cmpistrm(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRM / PCMPISTRM \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words, the type of comparison to perform, and the format of the return\n" "/// value. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// Bit [6]: Determines whether the result is zero-extended or expanded to 16\n" "/// bytes. \\n\n" "/// 0: The result is zero-extended to 16 bytes. \\n\n" "/// 1: The result is expanded to 16 bytes (this expansion is performed by\n" "/// repeating each bit 8 or 16 times).\n" "/// \\returns Returns a 128-bit integer vector representing the result mask of\n" "/// the comparison.\n" "#define _mm_cmpistrm(A, B, M) \\\n" " ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns an integer representing the result index of the\n" "/// comparison.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistri(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words, the type of comparison to perform, and the format of the return\n" "/// value. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// Bit [6]: Determines whether the index of the lowest set bit or the\n" "/// highest set bit is returned. \\n\n" "/// 0: The index of the least significant set bit. \\n\n" "/// 1: The index of the most significant set bit. \\n\n" "/// \\returns Returns an integer representing the result index of the comparison.\n" "#define _mm_cmpistri(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns a 128-bit integer vector representing the result\n" "/// mask of the comparison.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_cmpestrm(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRM / PCMPESTRM \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words, the type of comparison to perform, and the format of the return\n" "/// value. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// Bit [6]: Determines whether the result is zero-extended or expanded to 16\n" "/// bytes. \\n\n" "/// 0: The result is zero-extended to 16 bytes. \\n\n" "/// 1: The result is expanded to 16 bytes (this expansion is performed by\n" "/// repeating each bit 8 or 16 times). \\n\n" "/// \\returns Returns a 128-bit integer vector representing the result mask of\n" "/// the comparison.\n" "#define _mm_cmpestrm(A, LA, B, LB, M) \\\n" " ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns an integer representing the result index of the\n" "/// comparison.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestri(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words, the type of comparison to perform, and the format of the return\n" "/// value. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// Bit [6]: Determines whether the index of the lowest set bit or the\n" "/// highest set bit is returned. \\n\n" "/// 0: The index of the least significant set bit. \\n\n" "/// 1: The index of the most significant set bit. \\n\n" "/// \\returns Returns an integer representing the result index of the comparison.\n" "#define _mm_cmpestri(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the bit mask is zero and the length of the\n" "/// string in \\a B is the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistra(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// \\returns Returns 1 if the bit mask is zero and the length of the string in\n" "/// \\a B is the maximum; otherwise, returns 0.\n" "#define _mm_cmpistra(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the bit mask is non-zero, otherwise, returns\n" "/// 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistrc(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B.\n" "/// \\returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.\n" "#define _mm_cmpistrc(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns bit 0 of the resulting bit mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistro(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// \\returns Returns bit 0 of the resulting bit mask.\n" "#define _mm_cmpistro(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the length of the string in \\a A is less than\n" "/// the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistrs(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// \\returns Returns 1 if the length of the string in \\a A is less than the\n" "/// maximum, otherwise, returns 0.\n" "#define _mm_cmpistrs(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with implicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the length of the string in \\a B is less than\n" "/// the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpistrz(__m128i A, __m128i B, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B.\n" "/// \\returns Returns 1 if the length of the string in \\a B is less than the\n" "/// maximum, otherwise, returns 0.\n" "#define _mm_cmpistrz(A, B, M) \\\n" " ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the bit mask is zero and the length of the\n" "/// string in \\a B is the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestra(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B.\n" "/// \\returns Returns 1 if the bit mask is zero and the length of the string in\n" "/// \\a B is the maximum, otherwise, returns 0.\n" "#define _mm_cmpestra(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the resulting mask is non-zero, otherwise,\n" "/// returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestrc(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// \\returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.\n" "#define _mm_cmpestrc(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns bit 0 of the resulting bit mask.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestro(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B.\n" "/// \\returns Returns bit 0 of the resulting bit mask.\n" "#define _mm_cmpestro(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the length of the string in \\a A is less than\n" "/// the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestrs(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI \n" "/// instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement in the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B. \\n\n" "/// \\returns Returns 1 if the length of the string in \\a A is less than the\n" "/// maximum, otherwise, returns 0.\n" "#define _mm_cmpestrs(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/// Uses the immediate operand \\a M to perform a comparison of string\n" "/// data with explicitly defined lengths that is contained in source operands\n" "/// \\a A and \\a B. Returns 1 if the length of the string in \\a B is less than\n" "/// the maximum, otherwise, returns 0.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_cmpestrz(__m128i A, int LA, __m128i B, int LB, const int M);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPCMPESTRI instruction.\n" "///\n" "/// \\param A\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LA\n" "/// An integer that specifies the length of the string in \\a A.\n" "/// \\param B\n" "/// A 128-bit integer vector containing one of the source operands to be\n" "/// compared.\n" "/// \\param LB\n" "/// An integer that specifies the length of the string in \\a B.\n" "/// \\param M\n" "/// An 8-bit immediate operand specifying whether the characters are bytes or\n" "/// words and the type of comparison to perform. \\n\n" "/// Bits [1:0]: Determine source data format. \\n\n" "/// 00: 16 unsigned bytes \\n\n" "/// 01: 8 unsigned words \\n\n" "/// 10: 16 signed bytes \\n\n" "/// 11: 8 signed words \\n\n" "/// Bits [3:2]: Determine comparison type and aggregation method. \\n\n" "/// 00: Subset: Each character in \\a B is compared for equality with all\n" "/// the characters in \\a A. \\n\n" "/// 01: Ranges: Each character in \\a B is compared to \\a A. The comparison\n" "/// basis is greater than or equal for even-indexed elements in \\a A,\n" "/// and less than or equal for odd-indexed elements in \\a A. \\n\n" "/// 10: Match: Compare each pair of corresponding characters in \\a A and\n" "/// \\a B for equality. \\n\n" "/// 11: Substring: Search \\a B for substring matches of \\a A. \\n\n" "/// Bits [5:4]: Determine whether to perform a one's complement on the bit\n" "/// mask of the comparison results. \\n\n" "/// 00: No effect. \\n\n" "/// 01: Negate the bit mask. \\n\n" "/// 10: No effect. \\n\n" "/// 11: Negate the bit mask only for bits with an index less than or equal\n" "/// to the size of \\a A or \\a B.\n" "/// \\returns Returns 1 if the length of the string in \\a B is less than the\n" "/// maximum, otherwise, returns 0.\n" "#define _mm_cmpestrz(A, LA, B, LB, M) \\\n" " ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \\\n" " (__v16qi)(__m128i)(B), (int)(LB), \\\n" " (int)(M)))\n" "\n" "/* SSE4.2 Compare Packed Data -- Greater Than. */\n" "/// Compares each of the corresponding 64-bit values of the 128-bit\n" "/// integer vectors to determine if the values in the first operand are\n" "/// greater than those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPCMPGTQ / PCMPGTQ instruction.\n" "///\n" "/// \\param __V1\n" "/// A 128-bit integer vector.\n" "/// \\param __V2\n" "/// A 128-bit integer vector.\n" "/// \\returns A 128-bit integer vector containing the comparison results.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,\n" " __m128i __V2) {\n" " return (__m128i)((__v2di)__V1 > (__v2di)__V2);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#include \n" "\n" "#include \n" "\n" "#endif /* __SMMINTRIN_H */\n" "" } , { "/builtins/stdalign.h" , "/*===---- stdalign.h - Standard header for alignment ------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __STDALIGN_H\n" "#define __STDALIGN_H\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__cplusplus) || \\\n" " (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L)\n" "#ifndef __cplusplus\n" "#define alignas _Alignas\n" "#define alignof _Alignof\n" "#endif\n" "\n" "#define __alignas_is_defined 1\n" "#define __alignof_is_defined 1\n" "#endif /* __STDC_VERSION__ */\n" "\n" "#endif /* __STDALIGN_H */\n" "" } , { "/builtins/stdarg.h" , "/*===---- stdarg.h - Variable argument handling ----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __STDARG_H\n" "\n" "#ifndef __GNUC_VA_LIST\n" "#define __GNUC_VA_LIST\n" "typedef __builtin_va_list __gnuc_va_list;\n" "#endif\n" "\n" "#ifdef __need___va_list\n" "#undef __need___va_list\n" "#else\n" "#define __STDARG_H\n" "#ifndef _VA_LIST\n" "typedef __builtin_va_list va_list;\n" "#define _VA_LIST\n" "#endif\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "/* C2x does not require the second parameter for va_start. */\n" "#define va_start(ap, ...) __builtin_va_start(ap, 0)\n" "#else\n" "/* Versions before C2x do require the second parameter. */\n" "#define va_start(ap, param) __builtin_va_start(ap, param)\n" "#endif\n" "#define va_end(ap) __builtin_va_end(ap)\n" "#define va_arg(ap, type) __builtin_va_arg(ap, type)\n" "\n" "/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode\n" " * or -ansi is not specified, since it was not part of C90.\n" " */\n" "#define __va_copy(d,s) __builtin_va_copy(d,s)\n" "\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L) || \\\n" " !defined(__STRICT_ANSI__)\n" "#define va_copy(dest, src) __builtin_va_copy(dest, src)\n" "#endif\n" "\n" "#endif /* __STDARG_H */\n" "\n" "#endif /* not __STDARG_H */\n" "" } , { "/builtins/stdatomic.h" , "/*===---- stdatomic.h - Standard header for atomic types and operations -----===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __CLANG_STDATOMIC_H\n" "#define __CLANG_STDATOMIC_H\n" "\n" "/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for\n" " * example, already has a Clang-compatible stdatomic.h header.\n" " *\n" " * Exclude the MSVC path as well as the MSVC header as of the 14.31.30818\n" " * explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback\n" " * to the clang resource header until that is fully supported. The\n" " * `stdatomic.h` header requires C++ 23 or newer.\n" " */\n" "#if __STDC_HOSTED__ && \\\n" " __has_include_next() && \\\n" " (!defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 202002L))\n" "# include_next \n" "#else\n" "\n" "#include \n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "/* 7.17.1 Introduction */\n" "\n" "#define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE\n" "#define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE\n" "#define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE\n" "#define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE\n" "#define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE\n" "#define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE\n" "#define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE\n" "#define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE\n" "#define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE\n" "#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE\n" "\n" "/* 7.17.2 Initialization */\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L) || \\\n" " defined(__cplusplus)\n" "/* ATOMIC_VAR_INIT was removed in C2x, but still remains in C++23. */\n" "#define ATOMIC_VAR_INIT(value) (value)\n" "#endif\n" "\n" "#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L && \\\n" " __STDC_VERSION__ < 202000L) || \\\n" " (defined(__cplusplus) && __cplusplus >= 202002L)) && \\\n" " !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)\n" "/* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */\n" "#pragma clang deprecated(ATOMIC_VAR_INIT)\n" "#endif\n" "#define atomic_init __c11_atomic_init\n" "\n" "/* 7.17.3 Order and consistency */\n" "\n" "typedef enum memory_order {\n" " memory_order_relaxed = __ATOMIC_RELAXED,\n" " memory_order_consume = __ATOMIC_CONSUME,\n" " memory_order_acquire = __ATOMIC_ACQUIRE,\n" " memory_order_release = __ATOMIC_RELEASE,\n" " memory_order_acq_rel = __ATOMIC_ACQ_REL,\n" " memory_order_seq_cst = __ATOMIC_SEQ_CST\n" "} memory_order;\n" "\n" "#define kill_dependency(y) (y)\n" "\n" "/* 7.17.4 Fences */\n" "\n" "/* These should be provided by the libc implementation. */\n" "void atomic_thread_fence(memory_order);\n" "void atomic_signal_fence(memory_order);\n" "\n" "#define atomic_thread_fence(order) __c11_atomic_thread_fence(order)\n" "#define atomic_signal_fence(order) __c11_atomic_signal_fence(order)\n" "\n" "/* 7.17.5 Lock-free property */\n" "\n" "#define atomic_is_lock_free(obj) __c11_atomic_is_lock_free(sizeof(*(obj)))\n" "\n" "/* 7.17.6 Atomic integer types */\n" "\n" "#ifdef __cplusplus\n" "typedef _Atomic(bool) atomic_bool;\n" "#else\n" "typedef _Atomic(_Bool) atomic_bool;\n" "#endif\n" "typedef _Atomic(char) atomic_char;\n" "typedef _Atomic(signed char) atomic_schar;\n" "typedef _Atomic(unsigned char) atomic_uchar;\n" "typedef _Atomic(short) atomic_short;\n" "typedef _Atomic(unsigned short) atomic_ushort;\n" "typedef _Atomic(int) atomic_int;\n" "typedef _Atomic(unsigned int) atomic_uint;\n" "typedef _Atomic(long) atomic_long;\n" "typedef _Atomic(unsigned long) atomic_ulong;\n" "typedef _Atomic(long long) atomic_llong;\n" "typedef _Atomic(unsigned long long) atomic_ullong;\n" "typedef _Atomic(uint_least16_t) atomic_char16_t;\n" "typedef _Atomic(uint_least32_t) atomic_char32_t;\n" "typedef _Atomic(wchar_t) atomic_wchar_t;\n" "typedef _Atomic(int_least8_t) atomic_int_least8_t;\n" "typedef _Atomic(uint_least8_t) atomic_uint_least8_t;\n" "typedef _Atomic(int_least16_t) atomic_int_least16_t;\n" "typedef _Atomic(uint_least16_t) atomic_uint_least16_t;\n" "typedef _Atomic(int_least32_t) atomic_int_least32_t;\n" "typedef _Atomic(uint_least32_t) atomic_uint_least32_t;\n" "typedef _Atomic(int_least64_t) atomic_int_least64_t;\n" "typedef _Atomic(uint_least64_t) atomic_uint_least64_t;\n" "typedef _Atomic(int_fast8_t) atomic_int_fast8_t;\n" "typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;\n" "typedef _Atomic(int_fast16_t) atomic_int_fast16_t;\n" "typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;\n" "typedef _Atomic(int_fast32_t) atomic_int_fast32_t;\n" "typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;\n" "typedef _Atomic(int_fast64_t) atomic_int_fast64_t;\n" "typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;\n" "typedef _Atomic(intptr_t) atomic_intptr_t;\n" "typedef _Atomic(uintptr_t) atomic_uintptr_t;\n" "typedef _Atomic(size_t) atomic_size_t;\n" "typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;\n" "typedef _Atomic(intmax_t) atomic_intmax_t;\n" "typedef _Atomic(uintmax_t) atomic_uintmax_t;\n" "\n" "/* 7.17.7 Operations on atomic types */\n" "\n" "#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST)\n" "#define atomic_store_explicit __c11_atomic_store\n" "\n" "#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST)\n" "#define atomic_load_explicit __c11_atomic_load\n" "\n" "#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST)\n" "#define atomic_exchange_explicit __c11_atomic_exchange\n" "\n" "#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)\n" "#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong\n" "\n" "#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)\n" "#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak\n" "\n" "#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST)\n" "#define atomic_fetch_add_explicit __c11_atomic_fetch_add\n" "\n" "#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST)\n" "#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub\n" "\n" "#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST)\n" "#define atomic_fetch_or_explicit __c11_atomic_fetch_or\n" "\n" "#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST)\n" "#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor\n" "\n" "#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST)\n" "#define atomic_fetch_and_explicit __c11_atomic_fetch_and\n" "\n" "/* 7.17.8 Atomic flag type and operations */\n" "\n" "typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;\n" "\n" "#define ATOMIC_FLAG_INIT { 0 }\n" "\n" "/* These should be provided by the libc implementation. */\n" "#ifdef __cplusplus\n" "bool atomic_flag_test_and_set(volatile atomic_flag *);\n" "bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);\n" "#else\n" "_Bool atomic_flag_test_and_set(volatile atomic_flag *);\n" "_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);\n" "#endif\n" "void atomic_flag_clear(volatile atomic_flag *);\n" "void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);\n" "\n" "#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST)\n" "#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order)\n" "\n" "#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST)\n" "#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order)\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "\n" "#endif /* __STDC_HOSTED__ */\n" "#endif /* __CLANG_STDATOMIC_H */\n" "\n" "" } , { "/builtins/stdbool.h" , "/*===---- stdbool.h - Standard header for booleans -------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __STDBOOL_H\n" "#define __STDBOOL_H\n" "\n" "#define __bool_true_false_are_defined 1\n" "\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L\n" "/* FIXME: We should be issuing a deprecation warning here, but cannot yet due\n" " * to system headers which include this header file unconditionally.\n" " */\n" "#elif !defined(__cplusplus)\n" "#define bool _Bool\n" "#define true 1\n" "#define false 0\n" "#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)\n" "/* Define _Bool as a GNU extension. */\n" "#define _Bool bool\n" "#if defined(__cplusplus) && __cplusplus < 201103L\n" "/* For C++98, define bool, false, true as a GNU extension. */\n" "#define bool bool\n" "#define false false\n" "#define true true\n" "#endif\n" "#endif\n" "\n" "#endif /* __STDBOOL_H */\n" "" } , { "/builtins/stddef.h" , "/*===---- stddef.h - Basic type definitions --------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \\\n" " defined(__need_size_t) || defined(__need_wchar_t) || \\\n" " defined(__need_NULL) || defined(__need_wint_t)\n" "\n" "#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \\\n" " !defined(__need_wchar_t) && !defined(__need_NULL) && \\\n" " !defined(__need_wint_t)\n" "/* Always define miscellaneous pieces when modules are available. */\n" "#if !__has_feature(modules)\n" "#define __STDDEF_H\n" "#endif\n" "#define __need_ptrdiff_t\n" "#define __need_size_t\n" "#define __need_wchar_t\n" "#define __need_NULL\n" "#define __need_STDDEF_H_misc\n" "/* __need_wint_t is intentionally not defined here. */\n" "#endif\n" "\n" "#if defined(__need_ptrdiff_t)\n" "#if !defined(_PTRDIFF_T) || __has_feature(modules)\n" "/* Always define ptrdiff_t when modules are available. */\n" "#if !__has_feature(modules)\n" "#define _PTRDIFF_T\n" "#endif\n" "typedef __PTRDIFF_TYPE__ ptrdiff_t;\n" "#endif\n" "#undef __need_ptrdiff_t\n" "#endif /* defined(__need_ptrdiff_t) */\n" "\n" "#if defined(__need_size_t)\n" "#if !defined(_SIZE_T) || __has_feature(modules)\n" "/* Always define size_t when modules are available. */\n" "#if !__has_feature(modules)\n" "#define _SIZE_T\n" "#endif\n" "typedef __SIZE_TYPE__ size_t;\n" "#endif\n" "#undef __need_size_t\n" "#endif /*defined(__need_size_t) */\n" "\n" "#if defined(__need_STDDEF_H_misc)\n" "/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is\n" " * enabled. */\n" "#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \\\n" " !defined(_RSIZE_T)) || __has_feature(modules)\n" "/* Always define rsize_t when modules are available. */\n" "#if !__has_feature(modules)\n" "#define _RSIZE_T\n" "#endif\n" "typedef __SIZE_TYPE__ rsize_t;\n" "#endif\n" "#endif /* defined(__need_STDDEF_H_misc) */\n" "\n" "#if defined(__need_wchar_t)\n" "#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)\n" "/* Always define wchar_t when modules are available. */\n" "#if !defined(_WCHAR_T) || __has_feature(modules)\n" "#if !__has_feature(modules)\n" "#define _WCHAR_T\n" "#if defined(_MSC_EXTENSIONS)\n" "#define _WCHAR_T_DEFINED\n" "#endif\n" "#endif\n" "typedef __WCHAR_TYPE__ wchar_t;\n" "#endif\n" "#endif\n" "#undef __need_wchar_t\n" "#endif /* defined(__need_wchar_t) */\n" "\n" "#if defined(__need_NULL)\n" "#undef NULL\n" "#ifdef __cplusplus\n" "# if !defined(__MINGW32__) && !defined(_MSC_VER)\n" "# define NULL __null\n" "# else\n" "# define NULL 0\n" "# endif\n" "#else\n" "# define NULL ((void*)0)\n" "#endif\n" "#ifdef __cplusplus\n" "#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)\n" "namespace std { typedef decltype(nullptr) nullptr_t; }\n" "using ::std::nullptr_t;\n" "#endif\n" "#endif\n" "#undef __need_NULL\n" "#endif /* defined(__need_NULL) */\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "typedef typeof(nullptr) nullptr_t;\n" "#endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L */\n" "\n" "#if defined(__need_STDDEF_H_misc) && defined(__STDC_VERSION__) && \\\n" " __STDC_VERSION__ >= 202000L\n" "#define unreachable() __builtin_unreachable()\n" "#endif /* defined(__need_STDDEF_H_misc) && >= C23 */\n" "\n" "#if defined(__need_STDDEF_H_misc)\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \\\n" " (defined(__cplusplus) && __cplusplus >= 201103L)\n" "#include \"__stddef_max_align_t.h\"\n" "#endif\n" "#define offsetof(t, d) __builtin_offsetof(t, d)\n" "#undef __need_STDDEF_H_misc\n" "#endif /* defined(__need_STDDEF_H_misc) */\n" "\n" "/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use\n" "__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */\n" "#if defined(__need_wint_t)\n" "/* Always define wint_t when modules are available. */\n" "#if !defined(_WINT_T) || __has_feature(modules)\n" "#if !__has_feature(modules)\n" "#define _WINT_T\n" "#endif\n" "typedef __WINT_TYPE__ wint_t;\n" "#endif\n" "#undef __need_wint_t\n" "#endif /* __need_wint_t */\n" "\n" "#endif\n" "" } , { "/builtins/stdint.h" , "/*===---- stdint.h - Standard header for sized integer types --------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#ifndef __CLANG_STDINT_H2\n" "// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T\n" "// is defined until an inclusion of it without _STD_TYPES_T occurs, in which\n" "// case the header guard macro is defined.\n" "#if !defined(_AIX) || !defined(_STD_TYPES_T) || !defined(__STDC_HOSTED__)\n" "#define __CLANG_STDINT_H2\n" "#endif\n" "\n" "/* If we're hosted, fall back to the system's stdint.h, which might have\n" " * additional definitions.\n" " */\n" "#if __STDC_HOSTED__ && __has_include_next()\n" "\n" "// C99 7.18.3 Limits of other integer types\n" "//\n" "// Footnote 219, 220: C++ implementations should define these macros only when\n" "// __STDC_LIMIT_MACROS is defined before is included.\n" "//\n" "// Footnote 222: C++ implementations should define these macros only when\n" "// __STDC_CONSTANT_MACROS is defined before is included.\n" "//\n" "// C++11 [cstdint.syn]p2:\n" "//\n" "// The macros defined by are provided unconditionally. In particular,\n" "// the symbols __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS (mentioned in\n" "// footnotes 219, 220, and 222 in the C standard) play no role in C++.\n" "//\n" "// C11 removed the problematic footnotes.\n" "//\n" "// Work around this inconsistency by always defining those macros in C++ mode,\n" "// so that a C library implementation which follows the C99 standard can be\n" "// used in C++.\n" "# ifdef __cplusplus\n" "# if !defined(__STDC_LIMIT_MACROS)\n" "# define __STDC_LIMIT_MACROS\n" "# define __STDC_LIMIT_MACROS_DEFINED_BY_CLANG\n" "# endif\n" "# if !defined(__STDC_CONSTANT_MACROS)\n" "# define __STDC_CONSTANT_MACROS\n" "# define __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG\n" "# endif\n" "# endif\n" "\n" "# include_next \n" "\n" "# ifdef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG\n" "# undef __STDC_LIMIT_MACROS\n" "# undef __STDC_LIMIT_MACROS_DEFINED_BY_CLANG\n" "# endif\n" "# ifdef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG\n" "# undef __STDC_CONSTANT_MACROS\n" "# undef __STDC_CONSTANT_MACROS_DEFINED_BY_CLANG\n" "# endif\n" "\n" "#else\n" "\n" "/* C99 7.18.1.1 Exact-width integer types.\n" " * C99 7.18.1.2 Minimum-width integer types.\n" " * C99 7.18.1.3 Fastest minimum-width integer types.\n" " *\n" " * The standard requires that exact-width type be defined for 8-, 16-, 32-, and\n" " * 64-bit types if they are implemented. Other exact width types are optional.\n" " * This implementation defines an exact-width types for every integer width\n" " * that is represented in the standard integer types.\n" " *\n" " * The standard also requires minimum-width types be defined for 8-, 16-, 32-,\n" " * and 64-bit widths regardless of whether there are corresponding exact-width\n" " * types.\n" " *\n" " * To accommodate targets that are missing types that are exactly 8, 16, 32, or\n" " * 64 bits wide, this implementation takes an approach of cascading\n" " * redefinitions, redefining __int_leastN_t to successively smaller exact-width\n" " * types. It is therefore important that the types are defined in order of\n" " * descending widths.\n" " *\n" " * We currently assume that the minimum-width types and the fastest\n" " * minimum-width types are the same. This is allowed by the standard, but is\n" " * suboptimal.\n" " *\n" " * In violation of the standard, some targets do not implement a type that is\n" " * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).\n" " * To accommodate these targets, a required minimum-width type is only\n" " * defined if there exists an exact-width type of equal or greater width.\n" " */\n" "\n" "#ifdef __INT64_TYPE__\n" "# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/\n" "typedef __INT64_TYPE__ int64_t;\n" "# endif /* __int8_t_defined */\n" "typedef __UINT64_TYPE__ uint64_t;\n" "# undef __int_least64_t\n" "# define __int_least64_t int64_t\n" "# undef __uint_least64_t\n" "# define __uint_least64_t uint64_t\n" "# undef __int_least32_t\n" "# define __int_least32_t int64_t\n" "# undef __uint_least32_t\n" "# define __uint_least32_t uint64_t\n" "# undef __int_least16_t\n" "# define __int_least16_t int64_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint64_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int64_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint64_t\n" "#endif /* __INT64_TYPE__ */\n" "\n" "#ifdef __int_least64_t\n" "typedef __int_least64_t int_least64_t;\n" "typedef __uint_least64_t uint_least64_t;\n" "typedef __int_least64_t int_fast64_t;\n" "typedef __uint_least64_t uint_fast64_t;\n" "#endif /* __int_least64_t */\n" "\n" "#ifdef __INT56_TYPE__\n" "typedef __INT56_TYPE__ int56_t;\n" "typedef __UINT56_TYPE__ uint56_t;\n" "typedef int56_t int_least56_t;\n" "typedef uint56_t uint_least56_t;\n" "typedef int56_t int_fast56_t;\n" "typedef uint56_t uint_fast56_t;\n" "# undef __int_least32_t\n" "# define __int_least32_t int56_t\n" "# undef __uint_least32_t\n" "# define __uint_least32_t uint56_t\n" "# undef __int_least16_t\n" "# define __int_least16_t int56_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint56_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int56_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint56_t\n" "#endif /* __INT56_TYPE__ */\n" "\n" "\n" "#ifdef __INT48_TYPE__\n" "typedef __INT48_TYPE__ int48_t;\n" "typedef __UINT48_TYPE__ uint48_t;\n" "typedef int48_t int_least48_t;\n" "typedef uint48_t uint_least48_t;\n" "typedef int48_t int_fast48_t;\n" "typedef uint48_t uint_fast48_t;\n" "# undef __int_least32_t\n" "# define __int_least32_t int48_t\n" "# undef __uint_least32_t\n" "# define __uint_least32_t uint48_t\n" "# undef __int_least16_t\n" "# define __int_least16_t int48_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint48_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int48_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint48_t\n" "#endif /* __INT48_TYPE__ */\n" "\n" "\n" "#ifdef __INT40_TYPE__\n" "typedef __INT40_TYPE__ int40_t;\n" "typedef __UINT40_TYPE__ uint40_t;\n" "typedef int40_t int_least40_t;\n" "typedef uint40_t uint_least40_t;\n" "typedef int40_t int_fast40_t;\n" "typedef uint40_t uint_fast40_t;\n" "# undef __int_least32_t\n" "# define __int_least32_t int40_t\n" "# undef __uint_least32_t\n" "# define __uint_least32_t uint40_t\n" "# undef __int_least16_t\n" "# define __int_least16_t int40_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint40_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int40_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint40_t\n" "#endif /* __INT40_TYPE__ */\n" "\n" "\n" "#ifdef __INT32_TYPE__\n" "\n" "# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/\n" "typedef __INT32_TYPE__ int32_t;\n" "# endif /* __int8_t_defined */\n" "\n" "# ifndef __uint32_t_defined /* more glibc compatibility */\n" "# define __uint32_t_defined\n" "typedef __UINT32_TYPE__ uint32_t;\n" "# endif /* __uint32_t_defined */\n" "\n" "# undef __int_least32_t\n" "# define __int_least32_t int32_t\n" "# undef __uint_least32_t\n" "# define __uint_least32_t uint32_t\n" "# undef __int_least16_t\n" "# define __int_least16_t int32_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint32_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int32_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint32_t\n" "#endif /* __INT32_TYPE__ */\n" "\n" "#ifdef __int_least32_t\n" "typedef __int_least32_t int_least32_t;\n" "typedef __uint_least32_t uint_least32_t;\n" "typedef __int_least32_t int_fast32_t;\n" "typedef __uint_least32_t uint_fast32_t;\n" "#endif /* __int_least32_t */\n" "\n" "#ifdef __INT24_TYPE__\n" "typedef __INT24_TYPE__ int24_t;\n" "typedef __UINT24_TYPE__ uint24_t;\n" "typedef int24_t int_least24_t;\n" "typedef uint24_t uint_least24_t;\n" "typedef int24_t int_fast24_t;\n" "typedef uint24_t uint_fast24_t;\n" "# undef __int_least16_t\n" "# define __int_least16_t int24_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint24_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int24_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint24_t\n" "#endif /* __INT24_TYPE__ */\n" "\n" "#ifdef __INT16_TYPE__\n" "#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/\n" "typedef __INT16_TYPE__ int16_t;\n" "#endif /* __int8_t_defined */\n" "typedef __UINT16_TYPE__ uint16_t;\n" "# undef __int_least16_t\n" "# define __int_least16_t int16_t\n" "# undef __uint_least16_t\n" "# define __uint_least16_t uint16_t\n" "# undef __int_least8_t\n" "# define __int_least8_t int16_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint16_t\n" "#endif /* __INT16_TYPE__ */\n" "\n" "#ifdef __int_least16_t\n" "typedef __int_least16_t int_least16_t;\n" "typedef __uint_least16_t uint_least16_t;\n" "typedef __int_least16_t int_fast16_t;\n" "typedef __uint_least16_t uint_fast16_t;\n" "#endif /* __int_least16_t */\n" "\n" "\n" "#ifdef __INT8_TYPE__\n" "#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/\n" "typedef __INT8_TYPE__ int8_t;\n" "#endif /* __int8_t_defined */\n" "typedef __UINT8_TYPE__ uint8_t;\n" "# undef __int_least8_t\n" "# define __int_least8_t int8_t\n" "# undef __uint_least8_t\n" "# define __uint_least8_t uint8_t\n" "#endif /* __INT8_TYPE__ */\n" "\n" "#ifdef __int_least8_t\n" "typedef __int_least8_t int_least8_t;\n" "typedef __uint_least8_t uint_least8_t;\n" "typedef __int_least8_t int_fast8_t;\n" "typedef __uint_least8_t uint_fast8_t;\n" "#endif /* __int_least8_t */\n" "\n" "/* prevent glibc sys/types.h from defining conflicting types */\n" "#ifndef __int8_t_defined\n" "# define __int8_t_defined\n" "#endif /* __int8_t_defined */\n" "\n" "/* C99 7.18.1.4 Integer types capable of holding object pointers.\n" " */\n" "#define __stdint_join3(a,b,c) a ## b ## c\n" "\n" "#ifndef _INTPTR_T\n" "#ifndef __intptr_t_defined\n" "typedef __INTPTR_TYPE__ intptr_t;\n" "#define __intptr_t_defined\n" "#define _INTPTR_T\n" "#endif\n" "#endif\n" "\n" "#ifndef _UINTPTR_T\n" "typedef __UINTPTR_TYPE__ uintptr_t;\n" "#define _UINTPTR_T\n" "#endif\n" "\n" "/* C99 7.18.1.5 Greatest-width integer types.\n" " */\n" "typedef __INTMAX_TYPE__ intmax_t;\n" "typedef __UINTMAX_TYPE__ uintmax_t;\n" "\n" "/* C99 7.18.4 Macros for minimum-width integer constants.\n" " *\n" " * The standard requires that integer constant macros be defined for all the\n" " * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width\n" " * types are required, the corresponding integer constant macros are defined\n" " * here. This implementation also defines minimum-width types for every other\n" " * integer width that the target implements, so corresponding macros are\n" " * defined below, too.\n" " *\n" " * These macros are defined using the same successive-shrinking approach as\n" " * the type definitions above. It is likewise important that macros are defined\n" " * in order of decending width.\n" " *\n" " * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the\n" " * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).\n" " */\n" "\n" "#define __int_c_join(a, b) a ## b\n" "#define __int_c(v, suffix) __int_c_join(v, suffix)\n" "#define __uint_c(v, suffix) __int_c_join(v##U, suffix)\n" "\n" "\n" "#ifdef __INT64_TYPE__\n" "# undef __int64_c_suffix\n" "# undef __int32_c_suffix\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT64_C_SUFFIX__\n" "# define __int64_c_suffix __INT64_C_SUFFIX__\n" "# define __int32_c_suffix __INT64_C_SUFFIX__\n" "# define __int16_c_suffix __INT64_C_SUFFIX__\n" "# define __int8_c_suffix __INT64_C_SUFFIX__\n" "# endif /* __INT64_C_SUFFIX__ */\n" "#endif /* __INT64_TYPE__ */\n" "\n" "#ifdef __int_least64_t\n" "# ifdef __int64_c_suffix\n" "# define INT64_C(v) __int_c(v, __int64_c_suffix)\n" "# define UINT64_C(v) __uint_c(v, __int64_c_suffix)\n" "# else\n" "# define INT64_C(v) v\n" "# define UINT64_C(v) v ## U\n" "# endif /* __int64_c_suffix */\n" "#endif /* __int_least64_t */\n" "\n" "\n" "#ifdef __INT56_TYPE__\n" "# undef __int32_c_suffix\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT56_C_SUFFIX__\n" "# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)\n" "# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)\n" "# define __int32_c_suffix __INT56_C_SUFFIX__\n" "# define __int16_c_suffix __INT56_C_SUFFIX__\n" "# define __int8_c_suffix __INT56_C_SUFFIX__\n" "# else\n" "# define INT56_C(v) v\n" "# define UINT56_C(v) v ## U\n" "# endif /* __INT56_C_SUFFIX__ */\n" "#endif /* __INT56_TYPE__ */\n" "\n" "\n" "#ifdef __INT48_TYPE__\n" "# undef __int32_c_suffix\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT48_C_SUFFIX__\n" "# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)\n" "# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)\n" "# define __int32_c_suffix __INT48_C_SUFFIX__\n" "# define __int16_c_suffix __INT48_C_SUFFIX__\n" "# define __int8_c_suffix __INT48_C_SUFFIX__\n" "# else\n" "# define INT48_C(v) v\n" "# define UINT48_C(v) v ## U\n" "# endif /* __INT48_C_SUFFIX__ */\n" "#endif /* __INT48_TYPE__ */\n" "\n" "\n" "#ifdef __INT40_TYPE__\n" "# undef __int32_c_suffix\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT40_C_SUFFIX__\n" "# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)\n" "# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)\n" "# define __int32_c_suffix __INT40_C_SUFFIX__\n" "# define __int16_c_suffix __INT40_C_SUFFIX__\n" "# define __int8_c_suffix __INT40_C_SUFFIX__\n" "# else\n" "# define INT40_C(v) v\n" "# define UINT40_C(v) v ## U\n" "# endif /* __INT40_C_SUFFIX__ */\n" "#endif /* __INT40_TYPE__ */\n" "\n" "\n" "#ifdef __INT32_TYPE__\n" "# undef __int32_c_suffix\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT32_C_SUFFIX__\n" "# define __int32_c_suffix __INT32_C_SUFFIX__\n" "# define __int16_c_suffix __INT32_C_SUFFIX__\n" "# define __int8_c_suffix __INT32_C_SUFFIX__\n" "# endif /* __INT32_C_SUFFIX__ */\n" "#endif /* __INT32_TYPE__ */\n" "\n" "#ifdef __int_least32_t\n" "# ifdef __int32_c_suffix\n" "# define INT32_C(v) __int_c(v, __int32_c_suffix)\n" "# define UINT32_C(v) __uint_c(v, __int32_c_suffix)\n" "# else\n" "# define INT32_C(v) v\n" "# define UINT32_C(v) v ## U\n" "# endif /* __int32_c_suffix */\n" "#endif /* __int_least32_t */\n" "\n" "\n" "#ifdef __INT24_TYPE__\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT24_C_SUFFIX__\n" "# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)\n" "# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)\n" "# define __int16_c_suffix __INT24_C_SUFFIX__\n" "# define __int8_c_suffix __INT24_C_SUFFIX__\n" "# else\n" "# define INT24_C(v) v\n" "# define UINT24_C(v) v ## U\n" "# endif /* __INT24_C_SUFFIX__ */\n" "#endif /* __INT24_TYPE__ */\n" "\n" "\n" "#ifdef __INT16_TYPE__\n" "# undef __int16_c_suffix\n" "# undef __int8_c_suffix\n" "# ifdef __INT16_C_SUFFIX__\n" "# define __int16_c_suffix __INT16_C_SUFFIX__\n" "# define __int8_c_suffix __INT16_C_SUFFIX__\n" "# endif /* __INT16_C_SUFFIX__ */\n" "#endif /* __INT16_TYPE__ */\n" "\n" "#ifdef __int_least16_t\n" "# ifdef __int16_c_suffix\n" "# define INT16_C(v) __int_c(v, __int16_c_suffix)\n" "# define UINT16_C(v) __uint_c(v, __int16_c_suffix)\n" "# else\n" "# define INT16_C(v) v\n" "# define UINT16_C(v) v ## U\n" "# endif /* __int16_c_suffix */\n" "#endif /* __int_least16_t */\n" "\n" "\n" "#ifdef __INT8_TYPE__\n" "# undef __int8_c_suffix\n" "# ifdef __INT8_C_SUFFIX__\n" "# define __int8_c_suffix __INT8_C_SUFFIX__\n" "# endif /* __INT8_C_SUFFIX__ */\n" "#endif /* __INT8_TYPE__ */\n" "\n" "#ifdef __int_least8_t\n" "# ifdef __int8_c_suffix\n" "# define INT8_C(v) __int_c(v, __int8_c_suffix)\n" "# define UINT8_C(v) __uint_c(v, __int8_c_suffix)\n" "# else\n" "# define INT8_C(v) v\n" "# define UINT8_C(v) v ## U\n" "# endif /* __int8_c_suffix */\n" "#endif /* __int_least8_t */\n" "\n" "\n" "/* C99 7.18.2.1 Limits of exact-width integer types.\n" " * C99 7.18.2.2 Limits of minimum-width integer types.\n" " * C99 7.18.2.3 Limits of fastest minimum-width integer types.\n" " *\n" " * The presence of limit macros are completely optional in C99. This\n" " * implementation defines limits for all of the types (exact- and\n" " * minimum-width) that it defines above, using the limits of the minimum-width\n" " * type for any types that do not have exact-width representations.\n" " *\n" " * As in the type definitions, this section takes an approach of\n" " * successive-shrinking to determine which limits to use for the standard (8,\n" " * 16, 32, 64) bit widths when they don't have exact representations. It is\n" " * therefore important that the definitions be kept in order of decending\n" " * widths.\n" " *\n" " * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the\n" " * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).\n" " */\n" "\n" "#ifdef __INT64_TYPE__\n" "# define INT64_MAX INT64_C( 9223372036854775807)\n" "# define INT64_MIN (-INT64_C( 9223372036854775807)-1)\n" "# define UINT64_MAX UINT64_C(18446744073709551615)\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT64_WIDTH 64\n" "# define INT64_WIDTH UINT64_WIDTH\n" "\n" "# define __UINT_LEAST64_WIDTH UINT64_WIDTH\n" "# undef __UINT_LEAST32_WIDTH\n" "# define __UINT_LEAST32_WIDTH UINT64_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT64_WIDTH\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT64_MAX\n" "#endif /* __STDC_VERSION__ */\n" "\n" "# define __INT_LEAST64_MIN INT64_MIN\n" "# define __INT_LEAST64_MAX INT64_MAX\n" "# define __UINT_LEAST64_MAX UINT64_MAX\n" "# undef __INT_LEAST32_MIN\n" "# define __INT_LEAST32_MIN INT64_MIN\n" "# undef __INT_LEAST32_MAX\n" "# define __INT_LEAST32_MAX INT64_MAX\n" "# undef __UINT_LEAST32_MAX\n" "# define __UINT_LEAST32_MAX UINT64_MAX\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT64_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT64_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT64_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT64_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT64_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT64_MAX\n" "#endif /* __INT64_TYPE__ */\n" "\n" "#ifdef __INT_LEAST64_MIN\n" "# define INT_LEAST64_MIN __INT_LEAST64_MIN\n" "# define INT_LEAST64_MAX __INT_LEAST64_MAX\n" "# define UINT_LEAST64_MAX __UINT_LEAST64_MAX\n" "# define INT_FAST64_MIN __INT_LEAST64_MIN\n" "# define INT_FAST64_MAX __INT_LEAST64_MAX\n" "# define UINT_FAST64_MAX __UINT_LEAST64_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH\n" "# define INT_LEAST64_WIDTH UINT_LEAST64_WIDTH\n" "# define UINT_FAST64_WIDTH __UINT_LEAST64_WIDTH\n" "# define INT_FAST64_WIDTH UINT_FAST64_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT_LEAST64_MIN */\n" "\n" "\n" "#ifdef __INT56_TYPE__\n" "# define INT56_MAX INT56_C(36028797018963967)\n" "# define INT56_MIN (-INT56_C(36028797018963967)-1)\n" "# define UINT56_MAX UINT56_C(72057594037927935)\n" "# define INT_LEAST56_MIN INT56_MIN\n" "# define INT_LEAST56_MAX INT56_MAX\n" "# define UINT_LEAST56_MAX UINT56_MAX\n" "# define INT_FAST56_MIN INT56_MIN\n" "# define INT_FAST56_MAX INT56_MAX\n" "# define UINT_FAST56_MAX UINT56_MAX\n" "\n" "# undef __INT_LEAST32_MIN\n" "# define __INT_LEAST32_MIN INT56_MIN\n" "# undef __INT_LEAST32_MAX\n" "# define __INT_LEAST32_MAX INT56_MAX\n" "# undef __UINT_LEAST32_MAX\n" "# define __UINT_LEAST32_MAX UINT56_MAX\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT56_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT56_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT56_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT56_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT56_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT56_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT56_WIDTH 56\n" "# define INT56_WIDTH UINT56_WIDTH\n" "# define UINT_LEAST56_WIDTH UINT56_WIDTH\n" "# define INT_LEAST56_WIDTH UINT_LEAST56_WIDTH\n" "# define UINT_FAST56_WIDTH UINT56_WIDTH\n" "# define INT_FAST56_WIDTH UINT_FAST56_WIDTH\n" "# undef __UINT_LEAST32_WIDTH\n" "# define __UINT_LEAST32_WIDTH UINT56_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT56_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT56_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT56_TYPE__ */\n" "\n" "\n" "#ifdef __INT48_TYPE__\n" "# define INT48_MAX INT48_C(140737488355327)\n" "# define INT48_MIN (-INT48_C(140737488355327)-1)\n" "# define UINT48_MAX UINT48_C(281474976710655)\n" "# define INT_LEAST48_MIN INT48_MIN\n" "# define INT_LEAST48_MAX INT48_MAX\n" "# define UINT_LEAST48_MAX UINT48_MAX\n" "# define INT_FAST48_MIN INT48_MIN\n" "# define INT_FAST48_MAX INT48_MAX\n" "# define UINT_FAST48_MAX UINT48_MAX\n" "\n" "# undef __INT_LEAST32_MIN\n" "# define __INT_LEAST32_MIN INT48_MIN\n" "# undef __INT_LEAST32_MAX\n" "# define __INT_LEAST32_MAX INT48_MAX\n" "# undef __UINT_LEAST32_MAX\n" "# define __UINT_LEAST32_MAX UINT48_MAX\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT48_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT48_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT48_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT48_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT48_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT48_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "#define UINT48_WIDTH 48\n" "#define INT48_WIDTH UINT48_WIDTH\n" "#define UINT_LEAST48_WIDTH UINT48_WIDTH\n" "#define INT_LEAST48_WIDTH UINT_LEAST48_WIDTH\n" "#define UINT_FAST48_WIDTH UINT48_WIDTH\n" "#define INT_FAST48_WIDTH UINT_FAST48_WIDTH\n" "#undef __UINT_LEAST32_WIDTH\n" "#define __UINT_LEAST32_WIDTH UINT48_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "#define __UINT_LEAST16_WIDTH UINT48_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "#define __UINT_LEAST8_WIDTH UINT48_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT48_TYPE__ */\n" "\n" "\n" "#ifdef __INT40_TYPE__\n" "# define INT40_MAX INT40_C(549755813887)\n" "# define INT40_MIN (-INT40_C(549755813887)-1)\n" "# define UINT40_MAX UINT40_C(1099511627775)\n" "# define INT_LEAST40_MIN INT40_MIN\n" "# define INT_LEAST40_MAX INT40_MAX\n" "# define UINT_LEAST40_MAX UINT40_MAX\n" "# define INT_FAST40_MIN INT40_MIN\n" "# define INT_FAST40_MAX INT40_MAX\n" "# define UINT_FAST40_MAX UINT40_MAX\n" "\n" "# undef __INT_LEAST32_MIN\n" "# define __INT_LEAST32_MIN INT40_MIN\n" "# undef __INT_LEAST32_MAX\n" "# define __INT_LEAST32_MAX INT40_MAX\n" "# undef __UINT_LEAST32_MAX\n" "# define __UINT_LEAST32_MAX UINT40_MAX\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT40_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT40_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT40_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT40_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT40_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT40_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT40_WIDTH 40\n" "# define INT40_WIDTH UINT40_WIDTH\n" "# define UINT_LEAST40_WIDTH UINT40_WIDTH\n" "# define INT_LEAST40_WIDTH UINT_LEAST40_WIDTH\n" "# define UINT_FAST40_WIDTH UINT40_WIDTH\n" "# define INT_FAST40_WIDTH UINT_FAST40_WIDTH\n" "# undef __UINT_LEAST32_WIDTH\n" "# define __UINT_LEAST32_WIDTH UINT40_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT40_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT40_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT40_TYPE__ */\n" "\n" "\n" "#ifdef __INT32_TYPE__\n" "# define INT32_MAX INT32_C(2147483647)\n" "# define INT32_MIN (-INT32_C(2147483647)-1)\n" "# define UINT32_MAX UINT32_C(4294967295)\n" "\n" "# undef __INT_LEAST32_MIN\n" "# define __INT_LEAST32_MIN INT32_MIN\n" "# undef __INT_LEAST32_MAX\n" "# define __INT_LEAST32_MAX INT32_MAX\n" "# undef __UINT_LEAST32_MAX\n" "# define __UINT_LEAST32_MAX UINT32_MAX\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT32_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT32_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT32_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT32_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT32_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT32_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT32_WIDTH 32\n" "# define INT32_WIDTH UINT32_WIDTH\n" "# undef __UINT_LEAST32_WIDTH\n" "# define __UINT_LEAST32_WIDTH UINT32_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT32_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT32_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT32_TYPE__ */\n" "\n" "#ifdef __INT_LEAST32_MIN\n" "# define INT_LEAST32_MIN __INT_LEAST32_MIN\n" "# define INT_LEAST32_MAX __INT_LEAST32_MAX\n" "# define UINT_LEAST32_MAX __UINT_LEAST32_MAX\n" "# define INT_FAST32_MIN __INT_LEAST32_MIN\n" "# define INT_FAST32_MAX __INT_LEAST32_MAX\n" "# define UINT_FAST32_MAX __UINT_LEAST32_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH\n" "# define INT_LEAST32_WIDTH UINT_LEAST32_WIDTH\n" "# define UINT_FAST32_WIDTH __UINT_LEAST32_WIDTH\n" "# define INT_FAST32_WIDTH UINT_FAST32_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT_LEAST32_MIN */\n" "\n" "\n" "#ifdef __INT24_TYPE__\n" "# define INT24_MAX INT24_C(8388607)\n" "# define INT24_MIN (-INT24_C(8388607)-1)\n" "# define UINT24_MAX UINT24_C(16777215)\n" "# define INT_LEAST24_MIN INT24_MIN\n" "# define INT_LEAST24_MAX INT24_MAX\n" "# define UINT_LEAST24_MAX UINT24_MAX\n" "# define INT_FAST24_MIN INT24_MIN\n" "# define INT_FAST24_MAX INT24_MAX\n" "# define UINT_FAST24_MAX UINT24_MAX\n" "\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT24_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT24_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT24_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT24_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT24_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT24_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT24_WIDTH 24\n" "# define INT24_WIDTH UINT24_WIDTH\n" "# define UINT_LEAST24_WIDTH UINT24_WIDTH\n" "# define INT_LEAST24_WIDTH UINT_LEAST24_WIDTH\n" "# define UINT_FAST24_WIDTH UINT24_WIDTH\n" "# define INT_FAST24_WIDTH UINT_FAST24_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT24_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT24_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT24_TYPE__ */\n" "\n" "\n" "#ifdef __INT16_TYPE__\n" "#define INT16_MAX INT16_C(32767)\n" "#define INT16_MIN (-INT16_C(32767)-1)\n" "#define UINT16_MAX UINT16_C(65535)\n" "\n" "# undef __INT_LEAST16_MIN\n" "# define __INT_LEAST16_MIN INT16_MIN\n" "# undef __INT_LEAST16_MAX\n" "# define __INT_LEAST16_MAX INT16_MAX\n" "# undef __UINT_LEAST16_MAX\n" "# define __UINT_LEAST16_MAX UINT16_MAX\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT16_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT16_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT16_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT16_WIDTH 16\n" "# define INT16_WIDTH UINT16_WIDTH\n" "# undef __UINT_LEAST16_WIDTH\n" "# define __UINT_LEAST16_WIDTH UINT16_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT16_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT16_TYPE__ */\n" "\n" "#ifdef __INT_LEAST16_MIN\n" "# define INT_LEAST16_MIN __INT_LEAST16_MIN\n" "# define INT_LEAST16_MAX __INT_LEAST16_MAX\n" "# define UINT_LEAST16_MAX __UINT_LEAST16_MAX\n" "# define INT_FAST16_MIN __INT_LEAST16_MIN\n" "# define INT_FAST16_MAX __INT_LEAST16_MAX\n" "# define UINT_FAST16_MAX __UINT_LEAST16_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH\n" "# define INT_LEAST16_WIDTH UINT_LEAST16_WIDTH\n" "# define UINT_FAST16_WIDTH __UINT_LEAST16_WIDTH\n" "# define INT_FAST16_WIDTH UINT_FAST16_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT_LEAST16_MIN */\n" "\n" "\n" "#ifdef __INT8_TYPE__\n" "# define INT8_MAX INT8_C(127)\n" "# define INT8_MIN (-INT8_C(127)-1)\n" "# define UINT8_MAX UINT8_C(255)\n" "\n" "# undef __INT_LEAST8_MIN\n" "# define __INT_LEAST8_MIN INT8_MIN\n" "# undef __INT_LEAST8_MAX\n" "# define __INT_LEAST8_MAX INT8_MAX\n" "# undef __UINT_LEAST8_MAX\n" "# define __UINT_LEAST8_MAX UINT8_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT8_WIDTH 8\n" "# define INT8_WIDTH UINT8_WIDTH\n" "# undef __UINT_LEAST8_WIDTH\n" "# define __UINT_LEAST8_WIDTH UINT8_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT8_TYPE__ */\n" "\n" "#ifdef __INT_LEAST8_MIN\n" "# define INT_LEAST8_MIN __INT_LEAST8_MIN\n" "# define INT_LEAST8_MAX __INT_LEAST8_MAX\n" "# define UINT_LEAST8_MAX __UINT_LEAST8_MAX\n" "# define INT_FAST8_MIN __INT_LEAST8_MIN\n" "# define INT_FAST8_MAX __INT_LEAST8_MAX\n" "# define UINT_FAST8_MAX __UINT_LEAST8_MAX\n" "\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH\n" "# define INT_LEAST8_WIDTH UINT_LEAST8_WIDTH\n" "# define UINT_FAST8_WIDTH __UINT_LEAST8_WIDTH\n" "# define INT_FAST8_WIDTH UINT_FAST8_WIDTH\n" "#endif /* __STDC_VERSION__ */\n" "#endif /* __INT_LEAST8_MIN */\n" "\n" "/* Some utility macros */\n" "#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)\n" "#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)\n" "#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)\n" "#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))\n" "#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))\n" "\n" "/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */\n" "/* C99 7.18.3 Limits of other integer types. */\n" "\n" "#define INTPTR_MIN (-__INTPTR_MAX__-1)\n" "#define INTPTR_MAX __INTPTR_MAX__\n" "#define UINTPTR_MAX __UINTPTR_MAX__\n" "#define PTRDIFF_MIN (-__PTRDIFF_MAX__-1)\n" "#define PTRDIFF_MAX __PTRDIFF_MAX__\n" "#define SIZE_MAX __SIZE_MAX__\n" "\n" "/* C2x 7.20.2.4 Width of integer types capable of holding object pointers. */\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "/* NB: The C standard requires that these be the same value, but the compiler\n" " exposes separate internal width macros. */\n" "#define INTPTR_WIDTH __INTPTR_WIDTH__\n" "#define UINTPTR_WIDTH __UINTPTR_WIDTH__\n" "#endif\n" "\n" "/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__\n" " * is enabled. */\n" "#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1\n" "#define RSIZE_MAX (SIZE_MAX >> 1)\n" "#endif\n" "\n" "/* C99 7.18.2.5 Limits of greatest-width integer types. */\n" "#define INTMAX_MIN (-__INTMAX_MAX__-1)\n" "#define INTMAX_MAX __INTMAX_MAX__\n" "#define UINTMAX_MAX __UINTMAX_MAX__\n" "\n" "/* C2x 7.20.2.5 Width of greatest-width integer types. */\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "/* NB: The C standard requires that these be the same value, but the compiler\n" " exposes separate internal width macros. */\n" "#define INTMAX_WIDTH __INTMAX_WIDTH__\n" "#define UINTMAX_WIDTH __UINTMAX_WIDTH__\n" "#endif\n" "\n" "/* C99 7.18.3 Limits of other integer types. */\n" "#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)\n" "#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)\n" "#ifdef __WINT_UNSIGNED__\n" "# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)\n" "# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)\n" "#else\n" "# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)\n" "# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)\n" "#endif\n" "\n" "#ifndef WCHAR_MAX\n" "# define WCHAR_MAX __WCHAR_MAX__\n" "#endif\n" "#ifndef WCHAR_MIN\n" "# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)\n" "# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)\n" "# else\n" "# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)\n" "# endif\n" "#endif\n" "\n" "/* 7.18.4.2 Macros for greatest-width integer constants. */\n" "#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__)\n" "#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)\n" "\n" "/* C2x 7.20.3.x Width of other integer types. */\n" "/* FIXME: This is using the placeholder dates Clang produces for these macros\n" " in C2x mode; switch to the correct values once they've been published. */\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L\n" "#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__\n" "#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__\n" "#define SIZE_WIDTH __SIZE_WIDTH__\n" "#define WCHAR_WIDTH __WCHAR_WIDTH__\n" "#define WINT_WIDTH __WINT_WIDTH__\n" "#endif\n" "\n" "#endif /* __STDC_HOSTED__ */\n" "#endif /* __CLANG_STDINT_H2 */\n" "" } , { "/builtins/stdnoreturn.h" , "/*===---- stdnoreturn.h - Standard header for noreturn macro ---------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __STDNORETURN_H\n" "#define __STDNORETURN_H\n" "\n" "#define noreturn _Noreturn\n" "#define __noreturn_is_defined 1\n" "\n" "#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \\\n" " !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)\n" "/* The noreturn macro is deprecated in C2x. We do not mark it as such because\n" " including the header file in C2x is also deprecated and we do not want to\n" " issue a confusing diagnostic for code which includes \n" " followed by code that writes [[noreturn]]. The issue with such code is not\n" " with the attribute, or the use of 'noreturn', but the inclusion of the\n" " header. */\n" "/* FIXME: We should be issuing a deprecation warning here, but cannot yet due\n" " * to system headers which include this header file unconditionally.\n" " */\n" "#endif\n" "\n" "#endif /* __STDNORETURN_H */\n" "" } , { "/builtins/tbmintrin.h" , "/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __TBMINTRIN_H\n" "#define __TBMINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"tbm\")))\n" "\n" "#define __bextri_u32(a, b) \\\n" " ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \\\n" " (unsigned int)(b)))\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blcfill_u32(unsigned int __a)\n" "{\n" " return __a & (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blci_u32(unsigned int __a)\n" "{\n" " return __a | ~(__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blcic_u32(unsigned int __a)\n" "{\n" " return ~__a & (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blcmsk_u32(unsigned int __a)\n" "{\n" " return __a ^ (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blcs_u32(unsigned int __a)\n" "{\n" " return __a | (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blsfill_u32(unsigned int __a)\n" "{\n" " return __a | (__a - 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__blsic_u32(unsigned int __a)\n" "{\n" " return ~__a | (__a - 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__t1mskc_u32(unsigned int __a)\n" "{\n" " return ~__a | (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned int __DEFAULT_FN_ATTRS\n" "__tzmsk_u32(unsigned int __a)\n" "{\n" " return ~__a & (__a - 1);\n" "}\n" "\n" "#ifdef __x86_64__\n" "#define __bextri_u64(a, b) \\\n" " ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \\\n" " (unsigned long long)(b)))\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blcfill_u64(unsigned long long __a)\n" "{\n" " return __a & (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blci_u64(unsigned long long __a)\n" "{\n" " return __a | ~(__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blcic_u64(unsigned long long __a)\n" "{\n" " return ~__a & (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blcmsk_u64(unsigned long long __a)\n" "{\n" " return __a ^ (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blcs_u64(unsigned long long __a)\n" "{\n" " return __a | (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blsfill_u64(unsigned long long __a)\n" "{\n" " return __a | (__a - 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__blsic_u64(unsigned long long __a)\n" "{\n" " return ~__a | (__a - 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__t1mskc_u64(unsigned long long __a)\n" "{\n" " return ~__a | (__a + 1);\n" "}\n" "\n" "static __inline__ unsigned long long __DEFAULT_FN_ATTRS\n" "__tzmsk_u64(unsigned long long __a)\n" "{\n" " return ~__a & (__a - 1);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __TBMINTRIN_H */\n" "" } , { "/builtins/tgmath.h" , "/*===---- tgmath.h - Standard header for type generic math ----------------===*\\\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" "\\*===----------------------------------------------------------------------===*/\n" "\n" "#ifndef __CLANG_TGMATH_H\n" "#define __CLANG_TGMATH_H\n" "\n" "/* C99 7.22 Type-generic math . */\n" "#include \n" "\n" "/*\n" " * Allow additional definitions and implementation-defined values on Apple\n" " * platforms. This is done after #include to avoid depcycle conflicts\n" " * between libcxx and darwin in C++ modules builds.\n" " */\n" "#if defined(__APPLE__) && __STDC_HOSTED__ && __has_include_next()\n" "# include_next \n" "#else\n" "\n" "/* C++ handles type genericity with overloading in math.h. */\n" "#ifndef __cplusplus\n" "#include \n" "\n" "#define _TG_ATTRSp __attribute__((__overloadable__))\n" "#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))\n" "\n" "// promotion\n" "\n" "typedef void _Argument_type_is_not_arithmetic;\n" "static _Argument_type_is_not_arithmetic __tg_promote(...)\n" " __attribute__((__unavailable__,__overloadable__));\n" "static double _TG_ATTRSp __tg_promote(int);\n" "static double _TG_ATTRSp __tg_promote(unsigned int);\n" "static double _TG_ATTRSp __tg_promote(long);\n" "static double _TG_ATTRSp __tg_promote(unsigned long);\n" "static double _TG_ATTRSp __tg_promote(long long);\n" "static double _TG_ATTRSp __tg_promote(unsigned long long);\n" "static float _TG_ATTRSp __tg_promote(float);\n" "static double _TG_ATTRSp __tg_promote(double);\n" "static long double _TG_ATTRSp __tg_promote(long double);\n" "static float _Complex _TG_ATTRSp __tg_promote(float _Complex);\n" "static double _Complex _TG_ATTRSp __tg_promote(double _Complex);\n" "static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);\n" "\n" "#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))\n" "#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \\\n" " __tg_promote(__y)))\n" "#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \\\n" " __tg_promote(__y) + \\\n" " __tg_promote(__z)))\n" "\n" "// acos\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_acos(float __x) {return acosf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_acos(double __x) {return acos(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_acos(long double __x) {return acosl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_acos(float _Complex __x) {return cacosf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_acos(double _Complex __x) {return cacos(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_acos(long double _Complex __x) {return cacosl(__x);}\n" "\n" "#undef acos\n" "#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))\n" "\n" "// asin\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_asin(float __x) {return asinf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_asin(double __x) {return asin(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_asin(long double __x) {return asinl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_asin(float _Complex __x) {return casinf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_asin(double _Complex __x) {return casin(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_asin(long double _Complex __x) {return casinl(__x);}\n" "\n" "#undef asin\n" "#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))\n" "\n" "// atan\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_atan(float __x) {return atanf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_atan(double __x) {return atan(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_atan(long double __x) {return atanl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_atan(float _Complex __x) {return catanf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_atan(double _Complex __x) {return catan(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_atan(long double _Complex __x) {return catanl(__x);}\n" "\n" "#undef atan\n" "#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))\n" "\n" "// acosh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_acosh(float __x) {return acoshf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_acosh(double __x) {return acosh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_acosh(long double __x) {return acoshl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_acosh(float _Complex __x) {return cacoshf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_acosh(double _Complex __x) {return cacosh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_acosh(long double _Complex __x) {return cacoshl(__x);}\n" "\n" "#undef acosh\n" "#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))\n" "\n" "// asinh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_asinh(float __x) {return asinhf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_asinh(double __x) {return asinh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_asinh(long double __x) {return asinhl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_asinh(float _Complex __x) {return casinhf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_asinh(double _Complex __x) {return casinh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_asinh(long double _Complex __x) {return casinhl(__x);}\n" "\n" "#undef asinh\n" "#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))\n" "\n" "// atanh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_atanh(float __x) {return atanhf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_atanh(double __x) {return atanh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_atanh(long double __x) {return atanhl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_atanh(float _Complex __x) {return catanhf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_atanh(double _Complex __x) {return catanh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_atanh(long double _Complex __x) {return catanhl(__x);}\n" "\n" "#undef atanh\n" "#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))\n" "\n" "// cos\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_cos(float __x) {return cosf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_cos(double __x) {return cos(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_cos(long double __x) {return cosl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_cos(float _Complex __x) {return ccosf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_cos(double _Complex __x) {return ccos(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_cos(long double _Complex __x) {return ccosl(__x);}\n" "\n" "#undef cos\n" "#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))\n" "\n" "// sin\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_sin(float __x) {return sinf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_sin(double __x) {return sin(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_sin(long double __x) {return sinl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_sin(float _Complex __x) {return csinf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_sin(double _Complex __x) {return csin(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_sin(long double _Complex __x) {return csinl(__x);}\n" "\n" "#undef sin\n" "#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))\n" "\n" "// tan\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_tan(float __x) {return tanf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_tan(double __x) {return tan(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_tan(long double __x) {return tanl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_tan(float _Complex __x) {return ctanf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_tan(double _Complex __x) {return ctan(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_tan(long double _Complex __x) {return ctanl(__x);}\n" "\n" "#undef tan\n" "#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))\n" "\n" "// cosh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_cosh(float __x) {return coshf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_cosh(double __x) {return cosh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_cosh(long double __x) {return coshl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_cosh(float _Complex __x) {return ccoshf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_cosh(double _Complex __x) {return ccosh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_cosh(long double _Complex __x) {return ccoshl(__x);}\n" "\n" "#undef cosh\n" "#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))\n" "\n" "// sinh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_sinh(float __x) {return sinhf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_sinh(double __x) {return sinh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_sinh(long double __x) {return sinhl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_sinh(float _Complex __x) {return csinhf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_sinh(double _Complex __x) {return csinh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_sinh(long double _Complex __x) {return csinhl(__x);}\n" "\n" "#undef sinh\n" "#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))\n" "\n" "// tanh\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_tanh(float __x) {return tanhf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_tanh(double __x) {return tanh(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_tanh(long double __x) {return tanhl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_tanh(float _Complex __x) {return ctanhf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_tanh(double _Complex __x) {return ctanh(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_tanh(long double _Complex __x) {return ctanhl(__x);}\n" "\n" "#undef tanh\n" "#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))\n" "\n" "// exp\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_exp(float __x) {return expf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_exp(double __x) {return exp(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_exp(long double __x) {return expl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_exp(float _Complex __x) {return cexpf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_exp(double _Complex __x) {return cexp(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_exp(long double _Complex __x) {return cexpl(__x);}\n" "\n" "#undef exp\n" "#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))\n" "\n" "// log\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_log(float __x) {return logf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_log(double __x) {return log(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_log(long double __x) {return logl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_log(float _Complex __x) {return clogf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_log(double _Complex __x) {return clog(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_log(long double _Complex __x) {return clogl(__x);}\n" "\n" "#undef log\n" "#define log(__x) __tg_log(__tg_promote1((__x))(__x))\n" "\n" "// pow\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_pow(float __x, float __y) {return powf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_pow(double __x, double __y) {return pow(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_pow(long double __x, long double __y) {return powl(__x, __y);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_pow(long double _Complex __x, long double _Complex __y)\n" " {return cpowl(__x, __y);}\n" "\n" "#undef pow\n" "#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// sqrt\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_sqrt(float __x) {return sqrtf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_sqrt(double __x) {return sqrt(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_sqrt(long double __x) {return sqrtl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_sqrt(float _Complex __x) {return csqrtf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_sqrt(double _Complex __x) {return csqrt(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}\n" "\n" "#undef sqrt\n" "#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))\n" "\n" "// fabs\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fabs(float __x) {return fabsf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fabs(double __x) {return fabs(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fabs(long double __x) {return fabsl(__x);}\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fabs(float _Complex __x) {return cabsf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fabs(double _Complex __x) {return cabs(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fabs(long double _Complex __x) {return cabsl(__x);}\n" "\n" "#undef fabs\n" "#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))\n" "\n" "// atan2\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_atan2(double __x, double __y) {return atan2(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}\n" "\n" "#undef atan2\n" "#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// cbrt\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_cbrt(float __x) {return cbrtf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_cbrt(double __x) {return cbrt(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_cbrt(long double __x) {return cbrtl(__x);}\n" "\n" "#undef cbrt\n" "#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))\n" "\n" "// ceil\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_ceil(float __x) {return ceilf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_ceil(double __x) {return ceil(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_ceil(long double __x) {return ceill(__x);}\n" "\n" "#undef ceil\n" "#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))\n" "\n" "// copysign\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_copysign(double __x, double __y) {return copysign(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}\n" "\n" "#undef copysign\n" "#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// erf\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_erf(float __x) {return erff(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_erf(double __x) {return erf(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_erf(long double __x) {return erfl(__x);}\n" "\n" "#undef erf\n" "#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))\n" "\n" "// erfc\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_erfc(float __x) {return erfcf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_erfc(double __x) {return erfc(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_erfc(long double __x) {return erfcl(__x);}\n" "\n" "#undef erfc\n" "#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))\n" "\n" "// exp2\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_exp2(float __x) {return exp2f(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_exp2(double __x) {return exp2(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_exp2(long double __x) {return exp2l(__x);}\n" "\n" "#undef exp2\n" "#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))\n" "\n" "// expm1\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_expm1(float __x) {return expm1f(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_expm1(double __x) {return expm1(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_expm1(long double __x) {return expm1l(__x);}\n" "\n" "#undef expm1\n" "#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))\n" "\n" "// fdim\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fdim(double __x, double __y) {return fdim(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}\n" "\n" "#undef fdim\n" "#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// floor\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_floor(float __x) {return floorf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_floor(double __x) {return floor(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_floor(long double __x) {return floorl(__x);}\n" "\n" "#undef floor\n" "#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))\n" "\n" "// fma\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fma(float __x, float __y, float __z)\n" " {return fmaf(__x, __y, __z);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fma(double __x, double __y, double __z)\n" " {return fma(__x, __y, __z);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fma(long double __x,long double __y, long double __z)\n" " {return fmal(__x, __y, __z);}\n" "\n" "#undef fma\n" "#define fma(__x, __y, __z) \\\n" " __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \\\n" " __tg_promote3((__x), (__y), (__z))(__y), \\\n" " __tg_promote3((__x), (__y), (__z))(__z))\n" "\n" "// fmax\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fmax(double __x, double __y) {return fmax(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}\n" "\n" "#undef fmax\n" "#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// fmin\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fmin(float __x, float __y) {return fminf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fmin(double __x, double __y) {return fmin(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}\n" "\n" "#undef fmin\n" "#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// fmod\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_fmod(double __x, double __y) {return fmod(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}\n" "\n" "#undef fmod\n" "#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// frexp\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}\n" "\n" "#undef frexp\n" "#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)\n" "\n" "// hypot\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_hypot(double __x, double __y) {return hypot(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}\n" "\n" "#undef hypot\n" "#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// ilogb\n" "\n" "static int\n" " _TG_ATTRS\n" " __tg_ilogb(float __x) {return ilogbf(__x);}\n" "\n" "static int\n" " _TG_ATTRS\n" " __tg_ilogb(double __x) {return ilogb(__x);}\n" "\n" "static int\n" " _TG_ATTRS\n" " __tg_ilogb(long double __x) {return ilogbl(__x);}\n" "\n" "#undef ilogb\n" "#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))\n" "\n" "// ldexp\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}\n" "\n" "#undef ldexp\n" "#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)\n" "\n" "// lgamma\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_lgamma(float __x) {return lgammaf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_lgamma(double __x) {return lgamma(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_lgamma(long double __x) {return lgammal(__x);}\n" "\n" "#undef lgamma\n" "#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))\n" "\n" "// llrint\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llrint(float __x) {return llrintf(__x);}\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llrint(double __x) {return llrint(__x);}\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llrint(long double __x) {return llrintl(__x);}\n" "\n" "#undef llrint\n" "#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))\n" "\n" "// llround\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llround(float __x) {return llroundf(__x);}\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llround(double __x) {return llround(__x);}\n" "\n" "static long long\n" " _TG_ATTRS\n" " __tg_llround(long double __x) {return llroundl(__x);}\n" "\n" "#undef llround\n" "#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))\n" "\n" "// log10\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_log10(float __x) {return log10f(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_log10(double __x) {return log10(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_log10(long double __x) {return log10l(__x);}\n" "\n" "#undef log10\n" "#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))\n" "\n" "// log1p\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_log1p(float __x) {return log1pf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_log1p(double __x) {return log1p(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_log1p(long double __x) {return log1pl(__x);}\n" "\n" "#undef log1p\n" "#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))\n" "\n" "// log2\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_log2(float __x) {return log2f(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_log2(double __x) {return log2(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_log2(long double __x) {return log2l(__x);}\n" "\n" "#undef log2\n" "#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))\n" "\n" "// logb\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_logb(float __x) {return logbf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_logb(double __x) {return logb(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_logb(long double __x) {return logbl(__x);}\n" "\n" "#undef logb\n" "#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))\n" "\n" "// lrint\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lrint(float __x) {return lrintf(__x);}\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lrint(double __x) {return lrint(__x);}\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lrint(long double __x) {return lrintl(__x);}\n" "\n" "#undef lrint\n" "#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))\n" "\n" "// lround\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lround(float __x) {return lroundf(__x);}\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lround(double __x) {return lround(__x);}\n" "\n" "static long\n" " _TG_ATTRS\n" " __tg_lround(long double __x) {return lroundl(__x);}\n" "\n" "#undef lround\n" "#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))\n" "\n" "// nearbyint\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_nearbyint(float __x) {return nearbyintf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_nearbyint(double __x) {return nearbyint(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_nearbyint(long double __x) {return nearbyintl(__x);}\n" "\n" "#undef nearbyint\n" "#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))\n" "\n" "// nextafter\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}\n" "\n" "#undef nextafter\n" "#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// nexttoward\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}\n" "\n" "#undef nexttoward\n" "#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y))\n" "\n" "// remainder\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_remainder(double __x, double __y) {return remainder(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}\n" "\n" "#undef remainder\n" "#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y))\n" "\n" "// remquo\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_remquo(float __x, float __y, int* __z)\n" " {return remquof(__x, __y, __z);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_remquo(double __x, double __y, int* __z)\n" " {return remquo(__x, __y, __z);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_remquo(long double __x,long double __y, int* __z)\n" " {return remquol(__x, __y, __z);}\n" "\n" "#undef remquo\n" "#define remquo(__x, __y, __z) \\\n" " __tg_remquo(__tg_promote2((__x), (__y))(__x), \\\n" " __tg_promote2((__x), (__y))(__y), \\\n" " (__z))\n" "\n" "// rint\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_rint(float __x) {return rintf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_rint(double __x) {return rint(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_rint(long double __x) {return rintl(__x);}\n" "\n" "#undef rint\n" "#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))\n" "\n" "// round\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_round(float __x) {return roundf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_round(double __x) {return round(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_round(long double __x) {return roundl(__x);}\n" "\n" "#undef round\n" "#define round(__x) __tg_round(__tg_promote1((__x))(__x))\n" "\n" "// scalbn\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}\n" "\n" "#undef scalbn\n" "#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)\n" "\n" "// scalbln\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}\n" "\n" "#undef scalbln\n" "#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)\n" "\n" "// tgamma\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_tgamma(float __x) {return tgammaf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_tgamma(double __x) {return tgamma(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_tgamma(long double __x) {return tgammal(__x);}\n" "\n" "#undef tgamma\n" "#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))\n" "\n" "// trunc\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_trunc(float __x) {return truncf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_trunc(double __x) {return trunc(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_trunc(long double __x) {return truncl(__x);}\n" "\n" "#undef trunc\n" "#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))\n" "\n" "// carg\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_carg(float __x) {return atan2f(0.F, __x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_carg(double __x) {return atan2(0., __x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_carg(long double __x) {return atan2l(0.L, __x);}\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_carg(float _Complex __x) {return cargf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_carg(double _Complex __x) {return carg(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_carg(long double _Complex __x) {return cargl(__x);}\n" "\n" "#undef carg\n" "#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))\n" "\n" "// cimag\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_cimag(float __x) {return 0;}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_cimag(double __x) {return 0;}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_cimag(long double __x) {return 0;}\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_cimag(float _Complex __x) {return cimagf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_cimag(double _Complex __x) {return cimag(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_cimag(long double _Complex __x) {return cimagl(__x);}\n" "\n" "#undef cimag\n" "#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))\n" "\n" "// conj\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_conj(float __x) {return __x;}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_conj(double __x) {return __x;}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_conj(long double __x) {return __x;}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_conj(float _Complex __x) {return conjf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_conj(double _Complex __x) {return conj(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_conj(long double _Complex __x) {return conjl(__x);}\n" "\n" "#undef conj\n" "#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))\n" "\n" "// cproj\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_cproj(float __x) {return cprojf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_cproj(double __x) {return cproj(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_cproj(long double __x) {return cprojl(__x);}\n" "\n" "static float _Complex\n" " _TG_ATTRS\n" " __tg_cproj(float _Complex __x) {return cprojf(__x);}\n" "\n" "static double _Complex\n" " _TG_ATTRS\n" " __tg_cproj(double _Complex __x) {return cproj(__x);}\n" "\n" "static long double _Complex\n" " _TG_ATTRS\n" " __tg_cproj(long double _Complex __x) {return cprojl(__x);}\n" "\n" "#undef cproj\n" "#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))\n" "\n" "// creal\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_creal(float __x) {return __x;}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_creal(double __x) {return __x;}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_creal(long double __x) {return __x;}\n" "\n" "static float\n" " _TG_ATTRS\n" " __tg_creal(float _Complex __x) {return crealf(__x);}\n" "\n" "static double\n" " _TG_ATTRS\n" " __tg_creal(double _Complex __x) {return creal(__x);}\n" "\n" "static long double\n" " _TG_ATTRS\n" " __tg_creal(long double _Complex __x) {return creall(__x);}\n" "\n" "#undef creal\n" "#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))\n" "\n" "#undef _TG_ATTRSp\n" "#undef _TG_ATTRS\n" "\n" "#endif /* __cplusplus */\n" "#endif /* __has_include_next */\n" "#endif /* __CLANG_TGMATH_H */\n" "" } , { "/builtins/tmmintrin.h" , "/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __TMMINTRIN_H\n" "#define __TMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"ssse3\"), __min_vector_width__(64)))\n" "#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__(\"mmx,ssse3\"), __min_vector_width__(64)))\n" "\n" "/// Computes the absolute value of each of the packed 8-bit signed\n" "/// integers in the source operand and stores the 8-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PABSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [8 x i8].\n" "/// \\returns A 64-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_abs_pi8(__m64 __a)\n" "{\n" " return (__m64)__builtin_ia32_pabsb((__v8qi)__a);\n" "}\n" "\n" "/// Computes the absolute value of each of the packed 8-bit signed\n" "/// integers in the source operand and stores the 8-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [16 x i8].\n" "/// \\returns A 128-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_abs_epi8(__m128i __a)\n" "{\n" " return (__m128i)__builtin_elementwise_abs((__v16qs)__a);\n" "}\n" "\n" "/// Computes the absolute value of each of the packed 16-bit signed\n" "/// integers in the source operand and stores the 16-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PABSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16].\n" "/// \\returns A 64-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_abs_pi16(__m64 __a)\n" "{\n" " return (__m64)__builtin_ia32_pabsw((__v4hi)__a);\n" "}\n" "\n" "/// Computes the absolute value of each of the packed 16-bit signed\n" "/// integers in the source operand and stores the 16-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16].\n" "/// \\returns A 128-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_abs_epi16(__m128i __a)\n" "{\n" " return (__m128i)__builtin_elementwise_abs((__v8hi)__a);\n" "}\n" "\n" "/// Computes the absolute value of each of the packed 32-bit signed\n" "/// integers in the source operand and stores the 32-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PABSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [2 x i32].\n" "/// \\returns A 64-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_abs_pi32(__m64 __a)\n" "{\n" " return (__m64)__builtin_ia32_pabsd((__v2si)__a);\n" "}\n" "\n" "/// Computes the absolute value of each of the packed 32-bit signed\n" "/// integers in the source operand and stores the 32-bit unsigned integer\n" "/// results in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPABSD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32].\n" "/// \\returns A 128-bit integer vector containing the absolute values of the\n" "/// elements in the operand.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_abs_epi32(__m128i __a)\n" "{\n" " return (__m128i)__builtin_elementwise_abs((__v4si)__a);\n" "}\n" "\n" "/// Concatenates the two 128-bit integer vector operands, and\n" "/// right-shifts the result by the number of bytes specified in the immediate\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c PALIGNR instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [16 x i8] containing one of the source operands.\n" "/// \\param b\n" "/// A 128-bit vector of [16 x i8] containing one of the source operands.\n" "/// \\param n\n" "/// An immediate operand specifying how many bytes to right-shift the result.\n" "/// \\returns A 128-bit integer vector containing the concatenated right-shifted\n" "/// value.\n" "#define _mm_alignr_epi8(a, b, n) \\\n" " ((__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \\\n" " (__v16qi)(__m128i)(b), (n)))\n" "\n" "/// Concatenates the two 64-bit integer vector operands, and right-shifts\n" "/// the result by the number of bytes specified in the immediate operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the \\c PALIGNR instruction.\n" "///\n" "/// \\param a\n" "/// A 64-bit vector of [8 x i8] containing one of the source operands.\n" "/// \\param b\n" "/// A 64-bit vector of [8 x i8] containing one of the source operands.\n" "/// \\param n\n" "/// An immediate operand specifying how many bytes to right-shift the result.\n" "/// \\returns A 64-bit integer vector containing the concatenated right-shifted\n" "/// value.\n" "#define _mm_alignr_pi8(a, b, n) \\\n" " ((__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)))\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 128-bit vectors of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the horizontal sums of\n" "/// both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hadd_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 128-bit vectors of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x i32] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the horizontal sums of\n" "/// both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hadd_epi32(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 64-bit vectors of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHADDW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 64-bit vector of [4 x i16] containing the horizontal sums of both\n" "/// operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hadd_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 64-bit vectors of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHADDD instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [2 x i32] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [2 x i32] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 64-bit vector of [2 x i32] containing the horizontal sums of both\n" "/// operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hadd_pi32(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are\n" "/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to\n" "/// 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHADDSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the horizontal saturated\n" "/// sums of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hadds_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Horizontally adds the adjacent pairs of values contained in 2 packed\n" "/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are\n" "/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to\n" "/// 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHADDSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the lower bits of the\n" "/// destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal sums of the values are stored in the upper bits of the\n" "/// destination.\n" "/// \\returns A 64-bit vector of [4 x i16] containing the horizontal saturated\n" "/// sums of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hadds_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 128-bit vectors of [8 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the horizontal differences\n" "/// of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsub_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 128-bit vectors of [4 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x i32] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x i32] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 128-bit vector of [4 x i32] containing the horizontal differences\n" "/// of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsub_epi32(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 64-bit vectors of [4 x i16].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHSUBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 64-bit vector of [4 x i16] containing the horizontal differences\n" "/// of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hsub_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 64-bit vectors of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHSUBD instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [2 x i32] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [2 x i32] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 64-bit vector of [2 x i32] containing the horizontal differences\n" "/// of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hsub_pi32(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 128-bit vectors of [8 x i16]. Positive differences greater than\n" "/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are\n" "/// saturated to 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPHSUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the horizontal saturated\n" "/// differences of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsubs_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Horizontally subtracts the adjacent pairs of values contained in 2\n" "/// packed 64-bit vectors of [4 x i16]. Positive differences greater than\n" "/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are\n" "/// saturated to 0x8000.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PHSUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the lower bits of\n" "/// the destination.\n" "/// \\param __b\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands. The\n" "/// horizontal differences between the values are stored in the upper bits of\n" "/// the destination.\n" "/// \\returns A 64-bit vector of [4 x i16] containing the horizontal saturated\n" "/// differences of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_hsubs_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Multiplies corresponding pairs of packed 8-bit unsigned integer\n" "/// values contained in the first source operand and packed 8-bit signed\n" "/// integer values contained in the second source operand, adds pairs of\n" "/// contiguous products with signed saturation, and writes the 16-bit sums to\n" "/// the corresponding bits in the destination.\n" "///\n" "/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of\n" "/// both operands are multiplied, and the sum of both results is written to\n" "/// bits [15:0] of the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMADDUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the first source operand.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing the second source operand.\n" "/// \\returns A 128-bit integer vector containing the sums of products of both\n" "/// operands: \\n\n" "/// \\a R0 := (\\a __a0 * \\a __b0) + (\\a __a1 * \\a __b1) \\n\n" "/// \\a R1 := (\\a __a2 * \\a __b2) + (\\a __a3 * \\a __b3) \\n\n" "/// \\a R2 := (\\a __a4 * \\a __b4) + (\\a __a5 * \\a __b5) \\n\n" "/// \\a R3 := (\\a __a6 * \\a __b6) + (\\a __a7 * \\a __b7) \\n\n" "/// \\a R4 := (\\a __a8 * \\a __b8) + (\\a __a9 * \\a __b9) \\n\n" "/// \\a R5 := (\\a __a10 * \\a __b10) + (\\a __a11 * \\a __b11) \\n\n" "/// \\a R6 := (\\a __a12 * \\a __b12) + (\\a __a13 * \\a __b13) \\n\n" "/// \\a R7 := (\\a __a14 * \\a __b14) + (\\a __a15 * \\a __b15)\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maddubs_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);\n" "}\n" "\n" "/// Multiplies corresponding pairs of packed 8-bit unsigned integer\n" "/// values contained in the first source operand and packed 8-bit signed\n" "/// integer values contained in the second source operand, adds pairs of\n" "/// contiguous products with signed saturation, and writes the 16-bit sums to\n" "/// the corresponding bits in the destination.\n" "///\n" "/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of\n" "/// both operands are multiplied, and the sum of both results is written to\n" "/// bits [15:0] of the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PMADDUBSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the first source operand.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing the second source operand.\n" "/// \\returns A 64-bit integer vector containing the sums of products of both\n" "/// operands: \\n\n" "/// \\a R0 := (\\a __a0 * \\a __b0) + (\\a __a1 * \\a __b1) \\n\n" "/// \\a R1 := (\\a __a2 * \\a __b2) + (\\a __a3 * \\a __b3) \\n\n" "/// \\a R2 := (\\a __a4 * \\a __b4) + (\\a __a5 * \\a __b5) \\n\n" "/// \\a R3 := (\\a __a6 * \\a __b6) + (\\a __a7 * \\a __b7)\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_maddubs_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// Multiplies packed 16-bit signed integer values, truncates the 32-bit\n" "/// products to the 18 most significant bits by right-shifting, rounds the\n" "/// truncated value by adding 1, and writes bits [16:1] to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPMULHRSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [8 x i16] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [8 x i16] containing the rounded and scaled\n" "/// products of both operands.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_mulhrs_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// Multiplies packed 16-bit signed integer values, truncates the 32-bit\n" "/// products to the 18 most significant bits by right-shifting, rounds the\n" "/// truncated value by adding 1, and writes bits [16:1] to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PMULHRSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit vector of [4 x i16] containing one of the source operands.\n" "/// \\returns A 64-bit vector of [4 x i16] containing the rounded and scaled\n" "/// products of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_mulhrs_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Copies the 8-bit integers from a 128-bit integer vector to the\n" "/// destination or clears 8-bit values in the destination, as specified by\n" "/// the second source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSHUFB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing control bytes corresponding to\n" "/// positions in the destination:\n" "/// Bit 7: \\n\n" "/// 1: Clear the corresponding byte in the destination. \\n\n" "/// 0: Copy the selected source byte to the corresponding byte in the\n" "/// destination. \\n\n" "/// Bits [6:4] Reserved. \\n\n" "/// Bits [3:0] select the source byte to be copied.\n" "/// \\returns A 128-bit integer vector containing the copied or cleared values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_shuffle_epi8(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);\n" "}\n" "\n" "/// Copies the 8-bit integers from a 64-bit integer vector to the\n" "/// destination or clears 8-bit values in the destination, as specified by\n" "/// the second source operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PSHUFB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing control bytes corresponding to\n" "/// positions in the destination:\n" "/// Bit 7: \\n\n" "/// 1: Clear the corresponding byte in the destination. \\n\n" "/// 0: Copy the selected source byte to the corresponding byte in the\n" "/// destination. \\n\n" "/// Bits [3:0] select the source byte to be copied.\n" "/// \\returns A 64-bit integer vector containing the copied or cleared values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_shuffle_pi8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// For each 8-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the byte in the second source is negative, calculate the two's\n" "/// complement of the corresponding byte in the first source, and write that\n" "/// value to the destination. If the byte in the second source is positive,\n" "/// copy the corresponding byte from the first source to the destination. If\n" "/// the byte in the second source is zero, clear the corresponding byte in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGNB instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing control bytes corresponding to\n" "/// positions in the destination.\n" "/// \\returns A 128-bit integer vector containing the resultant values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sign_epi8(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);\n" "}\n" "\n" "/// For each 16-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the word in the second source is negative, calculate the two's\n" "/// complement of the corresponding word in the first source, and write that\n" "/// value to the destination. If the word in the second source is positive,\n" "/// copy the corresponding word from the first source to the destination. If\n" "/// the word in the second source is zero, clear the corresponding word in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGNW instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing control words corresponding to\n" "/// positions in the destination.\n" "/// \\returns A 128-bit integer vector containing the resultant values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sign_epi16(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);\n" "}\n" "\n" "/// For each 32-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the doubleword in the second source is negative, calculate the two's\n" "/// complement of the corresponding word in the first source, and write that\n" "/// value to the destination. If the doubleword in the second source is\n" "/// positive, copy the corresponding word from the first source to the\n" "/// destination. If the doubleword in the second source is zero, clear the\n" "/// corresponding word in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c VPSIGND instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 128-bit integer vector containing control doublewords corresponding to\n" "/// positions in the destination.\n" "/// \\returns A 128-bit integer vector containing the resultant values.\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sign_epi32(__m128i __a, __m128i __b)\n" "{\n" " return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);\n" "}\n" "\n" "/// For each 8-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the byte in the second source is negative, calculate the two's\n" "/// complement of the corresponding byte in the first source, and write that\n" "/// value to the destination. If the byte in the second source is positive,\n" "/// copy the corresponding byte from the first source to the destination. If\n" "/// the byte in the second source is zero, clear the corresponding byte in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PSIGNB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing control bytes corresponding to\n" "/// positions in the destination.\n" "/// \\returns A 64-bit integer vector containing the resultant values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_sign_pi8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// For each 16-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the word in the second source is negative, calculate the two's\n" "/// complement of the corresponding word in the first source, and write that\n" "/// value to the destination. If the word in the second source is positive,\n" "/// copy the corresponding word from the first source to the destination. If\n" "/// the word in the second source is zero, clear the corresponding word in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PSIGNW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing control words corresponding to\n" "/// positions in the destination.\n" "/// \\returns A 64-bit integer vector containing the resultant values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_sign_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// For each 32-bit integer in the first source operand, perform one of\n" "/// the following actions as specified by the second source operand.\n" "///\n" "/// If the doubleword in the second source is negative, calculate the two's\n" "/// complement of the corresponding doubleword in the first source, and\n" "/// write that value to the destination. If the doubleword in the second\n" "/// source is positive, copy the corresponding doubleword from the first\n" "/// source to the destination. If the doubleword in the second source is\n" "/// zero, clear the corresponding doubleword in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c PSIGND instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the values to be copied.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing two control doublewords corresponding\n" "/// to positions in the destination.\n" "/// \\returns A 64-bit integer vector containing the resultant values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_sign_pi32(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_MMX\n" "\n" "#endif /* __TMMINTRIN_H */\n" "" } , { "/builtins/tsxldtrkintrin.h" , "/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __TSXLDTRKINTRIN_H\n" "#define __TSXLDTRKINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file */\n" "#define _DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"tsxldtrk\")))\n" "\n" "/// Marks the start of an TSX (RTM) suspend load address tracking region. If\n" "/// this intrinsic is used inside a transactional region, subsequent loads\n" "/// are not added to the read set of the transaction. If it's used inside a\n" "/// suspend load address tracking region it will cause transaction abort.\n" "/// If it's used outside of a transactional region it behaves like a NOP.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c XSUSLDTRK instruction.\n" "///\n" "static __inline__ void _DEFAULT_FN_ATTRS\n" "_xsusldtrk (void)\n" "{\n" " __builtin_ia32_xsusldtrk();\n" "}\n" "\n" "/// Marks the end of an TSX (RTM) suspend load address tracking region. If this\n" "/// intrinsic is used inside a suspend load address tracking region it will\n" "/// end the suspend region and all following load addresses will be added to\n" "/// the transaction read set. If it's used inside an active transaction but\n" "/// not in a suspend region it will cause transaction abort. If it's used\n" "/// outside of a transactional region it behaves like a NOP.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c XRESLDTRK instruction.\n" "///\n" "static __inline__ void _DEFAULT_FN_ATTRS\n" "_xresldtrk (void)\n" "{\n" " __builtin_ia32_xresldtrk();\n" "}\n" "\n" "#undef _DEFAULT_FN_ATTRS\n" "\n" "#endif /* __TSXLDTRKINTRIN_H */\n" "" } , { "/builtins/uintrintrin.h" , "/*===------------------ uintrintrin.h - UINTR intrinsics -------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86GPRINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __UINTRINTRIN_H\n" "#define __UINTRINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"uintr\")))\n" "\n" "#ifdef __x86_64__\n" "\n" "struct __uintr_frame\n" "{\n" " unsigned long long rip;\n" " unsigned long long rflags;\n" " unsigned long long rsp;\n" "};\n" "\n" "/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a\n" "/// user interrupt cannot be delivered on the instruction boundary following\n" "/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in\n" "/// 64-bit mode, and software is not executing inside an enclave; otherwise,\n" "/// each causes an invalid-opcode exception. Causes a transactional abort if\n" "/// executed inside a transactional region; the abort loads EAX as it would\n" "/// had it been due to an execution of CLI.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CLUI instruction.\n" "///\n" "/// \\code{.operation}\n" "/// UIF := 0\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_clui (void)\n" "{\n" " __builtin_ia32_clui();\n" "}\n" "\n" "/// Sets the user interrupt flag (UIF). Its effect takes place immediately; a\n" "/// user interrupt may be delivered on the instruction boundary following\n" "/// STUI. Can be executed only if CR4.UINT = 1, the logical processor is in\n" "/// 64-bit mode, and software is not executing inside an enclave; otherwise,\n" "/// each causes an invalid-opcode exception. Causes a transactional abort if\n" "/// executed inside a transactional region; the abort loads EAX as it would\n" "/// had it been due to an execution of STI.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the STUI instruction.\n" "///\n" "/// \\code{.operation}\n" "/// UIF := 1\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_stui (void)\n" "{\n" " __builtin_ia32_stui();\n" "}\n" "\n" "/// Get the current value of the user interrupt flag (UIF). Can be executed\n" "/// regardless of CPL and inside a transactional region. Can be executed only\n" "/// if CR4.UINT = 1, the logical processor is in 64-bit mode, and software is\n" "/// not executing inside an enclave; otherwise, it causes an invalid-opcode\n" "/// exception.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the TESTUI instruction.\n" "///\n" "/// \\returns The current value of the user interrupt flag (UIF).\n" "///\n" "/// \\code{.operation}\n" "/// CF := UIF\n" "/// ZF := 0\n" "/// AF := 0\n" "/// OF := 0\n" "/// PF := 0\n" "/// SF := 0\n" "/// dst := CF\n" "/// \\endcode\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_testui (void)\n" "{\n" " return __builtin_ia32_testui();\n" "}\n" "\n" "/// Send interprocessor user interrupt. Can be executed only if\n" "/// CR4.UINT = IA32_UINT_TT[0] = 1, the logical processor is in 64-bit mode,\n" "/// and software is not executing inside an enclave; otherwise, it causes an\n" "/// invalid-opcode exception. May be executed at any privilege level, all of\n" "/// its memory accesses are performed with supervisor privilege.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the SENDUIPI instruction\n" "///\n" "/// \\param __a\n" "/// Index of user-interrupt target table entry in user-interrupt target\n" "/// table.\n" "///\n" "/// \\code{.operation}\n" "/// IF __a > UITTSZ\n" "/// GP (0)\n" "/// FI\n" "/// tempUITTE := MEM[UITTADDR + (a<<4)]\n" "/// // tempUITTE must be valid, and can't have any reserved bit set\n" "/// IF (tempUITTE.V == 0 OR tempUITTE[7:1] != 0)\n" "/// GP (0)\n" "/// FI\n" "/// tempUPID := MEM[tempUITTE.UPIDADDR] // under lock\n" "/// // tempUPID can't have any reserved bit set\n" "/// IF (tempUPID[15:2] != 0 OR tempUPID[31:24] != 0)\n" "/// GP (0) // release lock\n" "/// FI\n" "/// tempUPID.PIR[tempUITTE.UV] := 1;\n" "/// IF (tempUPID.SN == 0 AND tempUPID.ON == 0)\n" "/// tempUPID.ON := 1\n" "/// sendNotify := 1\n" "/// ELSE\n" "/// sendNotify := 0\n" "/// FI\n" "/// MEM[tempUITTE.UPIDADDR] := tempUPID // release lock\n" "/// IF sendNotify == 1\n" "/// IF IA32_APIC_BASE[10] == 1 // local APIC is in x2APIC mode\n" "/// // send ordinary IPI with vector tempUPID.NV to 32-bit physical APIC\n" "/// // ID tempUPID.NDST\n" "/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST)\n" "/// ELSE\n" "/// // send ordinary IPI with vector tempUPID.NV to 8-bit physical APIC\n" "/// // ID tempUPID.NDST[15:8]\n" "/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])\n" "/// FI\n" "/// FI\n" "/// \\endcode\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_senduipi (unsigned long long __a)\n" "{\n" " __builtin_ia32_senduipi(__a);\n" "}\n" "\n" "#endif /* __x86_64__ */\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __UINTRINTRIN_H */\n" "" } , { "/builtins/unwind.h" , "/*===---- unwind.h - Stack unwinding ----------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/* See \"Data Definitions for libgcc_s\" in the Linux Standard Base.*/\n" "\n" "#if defined(__APPLE__) && __has_include_next()\n" "/* Darwin (from 11.x on) provide an unwind.h. If that's available,\n" " * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,\n" " * so define that around the include.*/\n" "# ifndef _GNU_SOURCE\n" "# define _SHOULD_UNDEFINE_GNU_SOURCE\n" "# define _GNU_SOURCE\n" "# endif\n" "// libunwind's unwind.h reflects the current visibility. However, Mozilla\n" "// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the\n" "// visibility to default and export its contents. gcc also allows users to\n" "// override its override by #defining HIDE_EXPORTS (but note, this only obeys\n" "// the user's -fvisibility setting; it doesn't hide any exports on its own). We\n" "// imitate gcc's header here:\n" "# ifdef HIDE_EXPORTS\n" "# include_next \n" "# else\n" "# pragma GCC visibility push(default)\n" "# include_next \n" "# pragma GCC visibility pop\n" "# endif\n" "# ifdef _SHOULD_UNDEFINE_GNU_SOURCE\n" "# undef _GNU_SOURCE\n" "# undef _SHOULD_UNDEFINE_GNU_SOURCE\n" "# endif\n" "#else\n" "\n" "#ifndef __CLANG_UNWIND_H\n" "#define __CLANG_UNWIND_H\n" "\n" "#include \n" "\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "\n" "/* It is a bit strange for a header to play with the visibility of the\n" " symbols it declares, but this matches gcc's behavior and some programs\n" " depend on it */\n" "#ifndef HIDE_EXPORTS\n" "#pragma GCC visibility push(default)\n" "#endif\n" "\n" "typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__)));\n" "typedef intptr_t _Unwind_Sword __attribute__((__mode__(__unwind_word__)));\n" "typedef uintptr_t _Unwind_Ptr;\n" "typedef uintptr_t _Unwind_Internal_Ptr;\n" "typedef uint64_t _Unwind_Exception_Class;\n" "\n" "typedef intptr_t _sleb128_t;\n" "typedef uintptr_t _uleb128_t;\n" "\n" "struct _Unwind_Context;\n" "#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \\\n" " defined(__ARM_DWARF_EH__) || defined(__SEH__))\n" "struct _Unwind_Control_Block;\n" "typedef struct _Unwind_Control_Block _Unwind_Control_Block;\n" "#define _Unwind_Exception _Unwind_Control_Block /* Alias */\n" "#else\n" "struct _Unwind_Exception;\n" "typedef struct _Unwind_Exception _Unwind_Exception;\n" "#endif\n" "typedef enum {\n" " _URC_NO_REASON = 0,\n" "#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \\\n" " !defined(__ARM_DWARF_EH__) && !defined(__SEH__)\n" " _URC_OK = 0, /* used by ARM EHABI */\n" "#endif\n" " _URC_FOREIGN_EXCEPTION_CAUGHT = 1,\n" "\n" " _URC_FATAL_PHASE2_ERROR = 2,\n" " _URC_FATAL_PHASE1_ERROR = 3,\n" " _URC_NORMAL_STOP = 4,\n" "\n" " _URC_END_OF_STACK = 5,\n" " _URC_HANDLER_FOUND = 6,\n" " _URC_INSTALL_CONTEXT = 7,\n" " _URC_CONTINUE_UNWIND = 8,\n" "#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \\\n" " !defined(__ARM_DWARF_EH__) && !defined(__SEH__)\n" " _URC_FAILURE = 9 /* used by ARM EHABI */\n" "#endif\n" "} _Unwind_Reason_Code;\n" "\n" "typedef enum {\n" " _UA_SEARCH_PHASE = 1,\n" " _UA_CLEANUP_PHASE = 2,\n" "\n" " _UA_HANDLER_FRAME = 4,\n" " _UA_FORCE_UNWIND = 8,\n" " _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */\n" "} _Unwind_Action;\n" "\n" "typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,\n" " _Unwind_Exception *);\n" "\n" "#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \\\n" " defined(__ARM_DWARF_EH__) || defined(__SEH__))\n" "typedef struct _Unwind_Control_Block _Unwind_Control_Block;\n" "typedef uint32_t _Unwind_EHT_Header;\n" "\n" "struct _Unwind_Control_Block {\n" " uint64_t exception_class;\n" " void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);\n" " /* unwinder cache (private fields for the unwinder's use) */\n" " struct {\n" " uint32_t reserved1; /* forced unwind stop function, 0 if not forced */\n" " uint32_t reserved2; /* personality routine */\n" " uint32_t reserved3; /* callsite */\n" " uint32_t reserved4; /* forced unwind stop argument */\n" " uint32_t reserved5;\n" " } unwinder_cache;\n" " /* propagation barrier cache (valid after phase 1) */\n" " struct {\n" " uint32_t sp;\n" " uint32_t bitpattern[5];\n" " } barrier_cache;\n" " /* cleanup cache (preserved over cleanup) */\n" " struct {\n" " uint32_t bitpattern[4];\n" " } cleanup_cache;\n" " /* personality cache (for personality's benefit) */\n" " struct {\n" " uint32_t fnstart; /* function start address */\n" " _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */\n" " uint32_t additional; /* additional data */\n" " uint32_t reserved1;\n" " } pr_cache;\n" " long long int : 0; /* force alignment of next item to 8-byte boundary */\n" "} __attribute__((__aligned__(8)));\n" "#else\n" "struct _Unwind_Exception {\n" " _Unwind_Exception_Class exception_class;\n" " _Unwind_Exception_Cleanup_Fn exception_cleanup;\n" "#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__)\n" " _Unwind_Word private_[6];\n" "#else\n" " _Unwind_Word private_1;\n" " _Unwind_Word private_2;\n" "#endif\n" " /* The Itanium ABI requires that _Unwind_Exception objects are \"double-word\n" " * aligned\". GCC has interpreted this to mean \"use the maximum useful\n" " * alignment for the target\"; so do we. */\n" "} __attribute__((__aligned__));\n" "#endif\n" "\n" "typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,\n" " _Unwind_Exception_Class,\n" " _Unwind_Exception *,\n" " struct _Unwind_Context *,\n" " void *);\n" "\n" "typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(int, _Unwind_Action,\n" " _Unwind_Exception_Class,\n" " _Unwind_Exception *,\n" " struct _Unwind_Context *);\n" "typedef _Unwind_Personality_Fn __personality_routine;\n" "\n" "typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,\n" " void *);\n" "\n" "#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \\\n" " defined(__ARM_DWARF_EH__) || defined(__SEH__))\n" "typedef enum {\n" " _UVRSC_CORE = 0, /* integer register */\n" " _UVRSC_VFP = 1, /* vfp */\n" " _UVRSC_WMMXD = 3, /* Intel WMMX data register */\n" " _UVRSC_WMMXC = 4, /* Intel WMMX control register */\n" " _UVRSC_PSEUDO = 5 /* Special purpose pseudo register */\n" "} _Unwind_VRS_RegClass;\n" "\n" "typedef enum {\n" " _UVRSD_UINT32 = 0,\n" " _UVRSD_VFPX = 1,\n" " _UVRSD_UINT64 = 3,\n" " _UVRSD_FLOAT = 4,\n" " _UVRSD_DOUBLE = 5\n" "} _Unwind_VRS_DataRepresentation;\n" "\n" "typedef enum {\n" " _UVRSR_OK = 0,\n" " _UVRSR_NOT_IMPLEMENTED = 1,\n" " _UVRSR_FAILED = 2\n" "} _Unwind_VRS_Result;\n" "\n" "typedef uint32_t _Unwind_State;\n" "#define _US_VIRTUAL_UNWIND_FRAME ((_Unwind_State)0)\n" "#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)\n" "#define _US_UNWIND_FRAME_RESUME ((_Unwind_State)2)\n" "#define _US_ACTION_MASK ((_Unwind_State)3)\n" "#define _US_FORCE_UNWIND ((_Unwind_State)8)\n" "\n" "_Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,\n" " _Unwind_VRS_RegClass __regclass,\n" " uint32_t __regno,\n" " _Unwind_VRS_DataRepresentation __representation,\n" " void *__valuep);\n" "\n" "_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,\n" " _Unwind_VRS_RegClass __regclass,\n" " uint32_t __regno,\n" " _Unwind_VRS_DataRepresentation __representation,\n" " void *__valuep);\n" "\n" "static __inline__\n" "_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {\n" " _Unwind_Word __value;\n" " _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);\n" " return __value;\n" "}\n" "\n" "static __inline__\n" "void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,\n" " _Unwind_Word __value) {\n" " _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);\n" "}\n" "\n" "static __inline__\n" "_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {\n" " _Unwind_Word __ip = _Unwind_GetGR(__context, 15);\n" " return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */\n" "}\n" "\n" "static __inline__\n" "void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {\n" " _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;\n" " _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);\n" "}\n" "#else\n" "_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);\n" "void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);\n" "\n" "_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);\n" "void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);\n" "#endif\n" "\n" "\n" "_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);\n" "\n" "_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);\n" "\n" "_Unwind_Word _Unwind_GetBSP(struct _Unwind_Context *);\n" "\n" "void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);\n" "\n" "_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);\n" "\n" "/* DWARF EH functions; currently not available on Darwin/ARM */\n" "#if !defined(__APPLE__) || !defined(__arm__)\n" "_Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Exception *);\n" "_Unwind_Reason_Code _Unwind_ForcedUnwind(_Unwind_Exception *, _Unwind_Stop_Fn,\n" " void *);\n" "void _Unwind_DeleteException(_Unwind_Exception *);\n" "void _Unwind_Resume(_Unwind_Exception *);\n" "_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(_Unwind_Exception *);\n" "\n" "#endif\n" "\n" "_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);\n" "\n" "/* setjmp(3)/longjmp(3) stuff */\n" "typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;\n" "\n" "void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);\n" "void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);\n" "_Unwind_Reason_Code _Unwind_SjLj_RaiseException(_Unwind_Exception *);\n" "_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(_Unwind_Exception *,\n" " _Unwind_Stop_Fn, void *);\n" "void _Unwind_SjLj_Resume(_Unwind_Exception *);\n" "_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(_Unwind_Exception *);\n" "\n" "void *_Unwind_FindEnclosingFunction(void *);\n" "\n" "#ifdef __APPLE__\n" "\n" "_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)\n" " __attribute__((__unavailable__));\n" "_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)\n" " __attribute__((__unavailable__));\n" "\n" "/* Darwin-specific functions */\n" "void __register_frame(const void *);\n" "void __deregister_frame(const void *);\n" "\n" "struct dwarf_eh_bases {\n" " uintptr_t tbase;\n" " uintptr_t dbase;\n" " uintptr_t func;\n" "};\n" "void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);\n" "\n" "void __register_frame_info_bases(const void *, void *, void *, void *)\n" " __attribute__((__unavailable__));\n" "void __register_frame_info(const void *, void *) __attribute__((__unavailable__));\n" "void __register_frame_info_table_bases(const void *, void*, void *, void *)\n" " __attribute__((__unavailable__));\n" "void __register_frame_info_table(const void *, void *)\n" " __attribute__((__unavailable__));\n" "void __register_frame_table(const void *) __attribute__((__unavailable__));\n" "void __deregister_frame_info(const void *) __attribute__((__unavailable__));\n" "void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__));\n" "\n" "#else\n" "\n" "_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);\n" "_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);\n" "\n" "#endif\n" "\n" "\n" "#ifndef HIDE_EXPORTS\n" "#pragma GCC visibility pop\n" "#endif\n" "\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "\n" "#endif /* __CLANG_UNWIND_H */\n" "\n" "#endif\n" "\n" "" } , { "/builtins/vadefs.h" , "/* ===-------- vadefs.h ---------------------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "/* Only include this if we are aiming for MSVC compatibility. */\n" "#ifndef _MSC_VER\n" "#include_next \n" "#else\n" "\n" "#ifndef __clang_vadefs_h\n" "#define __clang_vadefs_h\n" "\n" "#include_next \n" "\n" "/* Override macros from vadefs.h with definitions that work with Clang. */\n" "#ifdef _crt_va_start\n" "#undef _crt_va_start\n" "#define _crt_va_start(ap, param) __builtin_va_start(ap, param)\n" "#endif\n" "#ifdef _crt_va_end\n" "#undef _crt_va_end\n" "#define _crt_va_end(ap) __builtin_va_end(ap)\n" "#endif\n" "#ifdef _crt_va_arg\n" "#undef _crt_va_arg\n" "#define _crt_va_arg(ap, type) __builtin_va_arg(ap, type)\n" "#endif\n" "\n" "/* VS 2015 switched to double underscore names, which is an improvement, but now\n" " * we have to intercept those names too.\n" " */\n" "#ifdef __crt_va_start\n" "#undef __crt_va_start\n" "#define __crt_va_start(ap, param) __builtin_va_start(ap, param)\n" "#endif\n" "#ifdef __crt_va_end\n" "#undef __crt_va_end\n" "#define __crt_va_end(ap) __builtin_va_end(ap)\n" "#endif\n" "#ifdef __crt_va_arg\n" "#undef __crt_va_arg\n" "#define __crt_va_arg(ap, type) __builtin_va_arg(ap, type)\n" "#endif\n" "\n" "#endif\n" "#endif\n" "" } , { "/builtins/vaesintrin.h" , "/*===------------------ vaesintrin.h - VAES intrinsics ---------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __VAESINTRIN_H\n" "#define __VAESINTRIN_H\n" "\n" "/* Default attributes for YMM forms. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"vaes\"), __min_vector_width__(256)))\n" "\n" "/* Default attributes for ZMM forms. */\n" "#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__(\"avx512f,vaes\"), __min_vector_width__(512)))\n" "\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS\n" " _mm256_aesenc_epi128(__m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_aesenc256((__v4di) __A,\n" " (__v4di) __B);\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS\n" " _mm256_aesdec_epi128(__m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_aesdec256((__v4di) __A,\n" " (__v4di) __B);\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS\n" " _mm256_aesenclast_epi128(__m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A,\n" " (__v4di) __B);\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS\n" " _mm256_aesdeclast_epi128(__m256i __A, __m256i __B)\n" "{\n" " return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A,\n" " (__v4di) __B);\n" "}\n" "\n" "#ifdef __AVX512FINTRIN_H\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_F\n" " _mm512_aesenc_epi128(__m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_aesenc512((__v8di) __A,\n" " (__v8di) __B);\n" "}\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_F\n" " _mm512_aesdec_epi128(__m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_aesdec512((__v8di) __A,\n" " (__v8di) __B);\n" "}\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_F\n" " _mm512_aesenclast_epi128(__m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A,\n" " (__v8di) __B);\n" "}\n" "\n" "static __inline__ __m512i __DEFAULT_FN_ATTRS_F\n" " _mm512_aesdeclast_epi128(__m512i __A, __m512i __B)\n" "{\n" " return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A,\n" " (__v8di) __B);\n" "}\n" "#endif // __AVX512FINTRIN_H\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_F\n" "\n" "#endif // __VAESINTRIN_H\n" "" } , { "/builtins/varargs.h" , "/*===---- varargs.h - Variable argument handling -------------------------------------===\n" "*\n" "* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" "* See https://llvm.org/LICENSE.txt for license information.\n" "* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" "*\n" "*===-----------------------------------------------------------------------===\n" "*/\n" "#ifndef __VARARGS_H\n" "#define __VARARGS_H\n" " #error \"Please use instead of \"\n" "#endif\n" "" } , { "/builtins/velintrin.h" , "/*===---- velintrin.h - VEL intrinsics for VE ------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __VEL_INTRIN_H__\n" "#define __VEL_INTRIN_H__\n" "\n" "// Vector registers\n" "typedef double __vr __attribute__((__vector_size__(2048)));\n" "\n" "// Vector mask registers\n" "#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L\n" "// For C99\n" "typedef _Bool __vm __attribute__((ext_vector_type(256)));\n" "typedef _Bool __vm256 __attribute__((ext_vector_type(256)));\n" "typedef _Bool __vm512 __attribute__((ext_vector_type(512)));\n" "#else\n" "#ifdef __cplusplus\n" "// For C++\n" "typedef bool __vm __attribute__((ext_vector_type(256)));\n" "typedef bool __vm256 __attribute__((ext_vector_type(256)));\n" "typedef bool __vm512 __attribute__((ext_vector_type(512)));\n" "#else\n" "#error need C++ or C99 to use vector intrinsics for VE\n" "#endif\n" "#endif\n" "\n" "enum VShuffleCodes {\n" " VE_VSHUFFLE_YUYU = 0,\n" " VE_VSHUFFLE_YUYL = 1,\n" " VE_VSHUFFLE_YUZU = 2,\n" " VE_VSHUFFLE_YUZL = 3,\n" " VE_VSHUFFLE_YLYU = 4,\n" " VE_VSHUFFLE_YLYL = 5,\n" " VE_VSHUFFLE_YLZU = 6,\n" " VE_VSHUFFLE_YLZL = 7,\n" " VE_VSHUFFLE_ZUYU = 8,\n" " VE_VSHUFFLE_ZUYL = 9,\n" " VE_VSHUFFLE_ZUZU = 10,\n" " VE_VSHUFFLE_ZUZL = 11,\n" " VE_VSHUFFLE_ZLYU = 12,\n" " VE_VSHUFFLE_ZLYL = 13,\n" " VE_VSHUFFLE_ZLZU = 14,\n" " VE_VSHUFFLE_ZLZL = 15,\n" "};\n" "\n" "// Use generated intrinsic name definitions\n" "#include \n" "\n" "// Use helper functions\n" "#include \n" "\n" "// pack\n" "\n" "#define _vel_pack_f32p __builtin_ve_vl_pack_f32p\n" "#define _vel_pack_f32a __builtin_ve_vl_pack_f32a\n" "\n" "static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {\n" " return (((unsigned long int)a) << 32) | b;\n" "}\n" "\n" "#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)\n" "#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)\n" "#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)\n" "#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)\n" "\n" "#endif\n" "" } , { "/builtins/velintrin_approx.h" , "/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __VEL_INTRIN_APPROX_H__\n" "#define __VEL_INTRIN_APPROX_H__\n" "\n" "static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {\n" " float s0;\n" " __vr v2, v3, v4, v5;\n" " v5 = _vel_vrcps_vvl(v1, l);\n" " s0 = 1.0;\n" " v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);\n" " v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);\n" " v2 = _vel_vfmuls_vvvl(v0, v3, l);\n" " v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);\n" " v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);\n" " v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);\n" " v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {\n" " float s0;\n" " __vr v2, v3, v4, v5;\n" " v5 = _vel_pvrcp_vvl(v1, l);\n" " s0 = 1.0;\n" " v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);\n" " v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);\n" " v2 = _vel_pvfmul_vvvl(v0, v3, l);\n" " v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);\n" " v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);\n" " v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);\n" " v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {\n" " float s1;\n" " __vr v1, v2, v3, v4;\n" " v4 = _vel_vrcps_vvl(v0, l);\n" " s1 = 1.0;\n" " v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);\n" " v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);\n" " v1 = _vel_vfmuls_vsvl(s0, v2, l);\n" " v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);\n" " v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);\n" " v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);\n" " v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {\n" " float s1;\n" " __vr v1, v2;\n" " s1 = 1.0f / s0;\n" " v1 = _vel_vfmuls_vsvl(s1, v0, l);\n" " v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);\n" " v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {\n" " __vr v1, v2, v3;\n" " v2 = _vel_vrcpd_vvl(v0, l);\n" " double s1 = 1.0;\n" " v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);\n" " v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);\n" " v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);\n" " v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);\n" " v1 = _vel_vaddul_vsvl(1, v1, l);\n" " v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);\n" " v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);\n" " v1 = _vel_vfmuld_vsvl(s0, v3, l);\n" " v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);\n" " v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {\n" " double s0, s1;\n" " __vr v1, v2, v3;\n" " v2 = _vel_vrsqrtdnex_vvl(v0, l);\n" " v1 = _vel_vfmuld_vvvl(v0, v2, l);\n" " s0 = 1.0;\n" " s1 = 0.5;\n" " v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);\n" " v3 = _vel_vfmuld_vsvl(s1, v3, l);\n" " v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);\n" " v1 = _vel_vfmuld_vvvl(v0, v2, l);\n" " v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);\n" " v3 = _vel_vfmuld_vsvl(s1, v3, l);\n" " v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);\n" " return v0;\n" "}\n" "\n" "static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {\n" " float s0, s1;\n" " __vr v1, v2, v3;\n" " v0 = _vel_vcvtds_vvl(v0, l);\n" " v2 = _vel_vrsqrtdnex_vvl(v0, l);\n" " v1 = _vel_vfmuld_vvvl(v0, v2, l);\n" " s0 = 1.0;\n" " s1 = 0.5;\n" " v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);\n" " v3 = _vel_vfmuld_vsvl(s1, v3, l);\n" " v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);\n" " v1 = _vel_vfmuld_vvvl(v0, v2, l);\n" " v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);\n" " v3 = _vel_vfmuld_vsvl(s1, v3, l);\n" " v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);\n" " v0 = _vel_vcvtsd_vvl(v0, l);\n" " return v0;\n" "}\n" "\n" "#endif\n" "" } , { "/builtins/velintrin_gen.h" , "#define _vel_vld_vssl __builtin_ve_vl_vld_vssl\n" "#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl\n" "#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl\n" "#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl\n" "#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl\n" "#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl\n" "#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl\n" "#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl\n" "#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl\n" "#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl\n" "#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl\n" "#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl\n" "#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl\n" "#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl\n" "#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl\n" "#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl\n" "#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl\n" "#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl\n" "#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl\n" "#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl\n" "#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl\n" "#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl\n" "#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl\n" "#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl\n" "#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl\n" "#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl\n" "#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl\n" "#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl\n" "#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl\n" "#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl\n" "#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl\n" "#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl\n" "#define _vel_vst_vssl __builtin_ve_vl_vst_vssl\n" "#define _vel_vst_vssml __builtin_ve_vl_vst_vssml\n" "#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl\n" "#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml\n" "#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl\n" "#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml\n" "#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl\n" "#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml\n" "#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl\n" "#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml\n" "#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl\n" "#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml\n" "#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl\n" "#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml\n" "#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl\n" "#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml\n" "#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl\n" "#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml\n" "#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl\n" "#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml\n" "#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl\n" "#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml\n" "#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl\n" "#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml\n" "#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl\n" "#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml\n" "#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl\n" "#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml\n" "#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl\n" "#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml\n" "#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl\n" "#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml\n" "#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl\n" "#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml\n" "#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl\n" "#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml\n" "#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl\n" "#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml\n" "#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl\n" "#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml\n" "#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl\n" "#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml\n" "#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl\n" "#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml\n" "#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl\n" "#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml\n" "#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl\n" "#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml\n" "#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl\n" "#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl\n" "#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss\n" "#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs\n" "#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs\n" "#define _vel_lvss_svs __builtin_ve_vl_lvss_svs\n" "#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss\n" "#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss\n" "#define _vel_svm_sms __builtin_ve_vl_svm_sms\n" "#define _vel_svm_sMs __builtin_ve_vl_svm_sMs\n" "#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl\n" "#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl\n" "#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl\n" "#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl\n" "#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl\n" "#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl\n" "#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl\n" "#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl\n" "#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl\n" "#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl\n" "#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl\n" "#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl\n" "#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl\n" "#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl\n" "#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl\n" "#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl\n" "#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl\n" "#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl\n" "#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl\n" "#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl\n" "#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl\n" "#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl\n" "#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl\n" "#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl\n" "#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl\n" "#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl\n" "#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl\n" "#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl\n" "#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl\n" "#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl\n" "#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl\n" "#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl\n" "#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl\n" "#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl\n" "#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl\n" "#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl\n" "#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl\n" "#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl\n" "#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl\n" "#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl\n" "#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl\n" "#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl\n" "#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl\n" "#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl\n" "#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl\n" "#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl\n" "#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl\n" "#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl\n" "#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl\n" "#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl\n" "#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl\n" "#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl\n" "#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl\n" "#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl\n" "#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl\n" "#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl\n" "#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl\n" "#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl\n" "#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl\n" "#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl\n" "#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl\n" "#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl\n" "#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl\n" "#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl\n" "#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl\n" "#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl\n" "#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl\n" "#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl\n" "#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl\n" "#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl\n" "#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl\n" "#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl\n" "#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl\n" "#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl\n" "#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl\n" "#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl\n" "#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl\n" "#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl\n" "#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl\n" "#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl\n" "#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl\n" "#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl\n" "#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl\n" "#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl\n" "#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl\n" "#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl\n" "#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl\n" "#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl\n" "#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl\n" "#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl\n" "#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl\n" "#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl\n" "#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl\n" "#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl\n" "#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl\n" "#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl\n" "#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl\n" "#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl\n" "#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl\n" "#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl\n" "#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl\n" "#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl\n" "#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl\n" "#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl\n" "#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl\n" "#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl\n" "#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl\n" "#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl\n" "#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl\n" "#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl\n" "#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl\n" "#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl\n" "#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl\n" "#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl\n" "#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl\n" "#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl\n" "#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl\n" "#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl\n" "#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl\n" "#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl\n" "#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl\n" "#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl\n" "#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl\n" "#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl\n" "#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl\n" "#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl\n" "#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl\n" "#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl\n" "#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl\n" "#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl\n" "#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl\n" "#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl\n" "#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl\n" "#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl\n" "#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl\n" "#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl\n" "#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl\n" "#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl\n" "#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl\n" "#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl\n" "#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl\n" "#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl\n" "#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl\n" "#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl\n" "#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl\n" "#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl\n" "#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl\n" "#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl\n" "#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl\n" "#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl\n" "#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl\n" "#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl\n" "#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl\n" "#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl\n" "#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl\n" "#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl\n" "#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl\n" "#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl\n" "#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl\n" "#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl\n" "#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl\n" "#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl\n" "#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl\n" "#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl\n" "#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl\n" "#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl\n" "#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl\n" "#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl\n" "#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl\n" "#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl\n" "#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl\n" "#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl\n" "#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl\n" "#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl\n" "#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl\n" "#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl\n" "#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl\n" "#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl\n" "#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl\n" "#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl\n" "#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl\n" "#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl\n" "#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl\n" "#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl\n" "#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl\n" "#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl\n" "#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl\n" "#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl\n" "#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl\n" "#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl\n" "#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl\n" "#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl\n" "#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl\n" "#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl\n" "#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl\n" "#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl\n" "#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl\n" "#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl\n" "#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl\n" "#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl\n" "#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl\n" "#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl\n" "#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl\n" "#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl\n" "#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl\n" "#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl\n" "#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl\n" "#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl\n" "#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl\n" "#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl\n" "#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl\n" "#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl\n" "#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl\n" "#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl\n" "#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl\n" "#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl\n" "#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl\n" "#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl\n" "#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl\n" "#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl\n" "#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl\n" "#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl\n" "#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl\n" "#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl\n" "#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl\n" "#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl\n" "#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl\n" "#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl\n" "#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl\n" "#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl\n" "#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl\n" "#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl\n" "#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl\n" "#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl\n" "#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl\n" "#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl\n" "#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl\n" "#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl\n" "#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl\n" "#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl\n" "#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl\n" "#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl\n" "#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl\n" "#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl\n" "#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl\n" "#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl\n" "#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl\n" "#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl\n" "#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl\n" "#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl\n" "#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl\n" "#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl\n" "#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl\n" "#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl\n" "#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl\n" "#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl\n" "#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl\n" "#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl\n" "#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl\n" "#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl\n" "#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl\n" "#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl\n" "#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl\n" "#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl\n" "#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl\n" "#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl\n" "#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl\n" "#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl\n" "#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl\n" "#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl\n" "#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl\n" "#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl\n" "#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl\n" "#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl\n" "#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl\n" "#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl\n" "#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl\n" "#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl\n" "#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl\n" "#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl\n" "#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl\n" "#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl\n" "#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl\n" "#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl\n" "#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl\n" "#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl\n" "#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl\n" "#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl\n" "#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl\n" "#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl\n" "#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl\n" "#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl\n" "#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl\n" "#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl\n" "#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl\n" "#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl\n" "#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl\n" "#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl\n" "#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl\n" "#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl\n" "#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl\n" "#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl\n" "#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl\n" "#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl\n" "#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl\n" "#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl\n" "#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl\n" "#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl\n" "#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl\n" "#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl\n" "#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl\n" "#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl\n" "#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl\n" "#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl\n" "#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl\n" "#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl\n" "#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl\n" "#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl\n" "#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl\n" "#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl\n" "#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl\n" "#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl\n" "#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl\n" "#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl\n" "#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl\n" "#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl\n" "#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl\n" "#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl\n" "#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl\n" "#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl\n" "#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl\n" "#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl\n" "#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl\n" "#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl\n" "#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl\n" "#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl\n" "#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl\n" "#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl\n" "#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl\n" "#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl\n" "#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl\n" "#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl\n" "#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl\n" "#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl\n" "#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl\n" "#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl\n" "#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl\n" "#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl\n" "#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl\n" "#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl\n" "#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl\n" "#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl\n" "#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl\n" "#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl\n" "#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl\n" "#define _vel_vseq_vl __builtin_ve_vl_vseq_vl\n" "#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl\n" "#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl\n" "#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl\n" "#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl\n" "#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl\n" "#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl\n" "#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl\n" "#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl\n" "#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl\n" "#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl\n" "#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl\n" "#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl\n" "#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl\n" "#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl\n" "#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl\n" "#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl\n" "#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl\n" "#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl\n" "#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl\n" "#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl\n" "#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl\n" "#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl\n" "#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl\n" "#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl\n" "#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl\n" "#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl\n" "#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl\n" "#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl\n" "#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl\n" "#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl\n" "#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl\n" "#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl\n" "#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl\n" "#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl\n" "#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl\n" "#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl\n" "#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl\n" "#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl\n" "#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl\n" "#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl\n" "#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl\n" "#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl\n" "#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl\n" "#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl\n" "#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl\n" "#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl\n" "#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl\n" "#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl\n" "#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl\n" "#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl\n" "#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl\n" "#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl\n" "#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl\n" "#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl\n" "#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl\n" "#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl\n" "#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl\n" "#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl\n" "#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl\n" "#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl\n" "#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl\n" "#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl\n" "#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl\n" "#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl\n" "#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl\n" "#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl\n" "#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl\n" "#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl\n" "#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl\n" "#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl\n" "#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl\n" "#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl\n" "#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl\n" "#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl\n" "#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl\n" "#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl\n" "#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl\n" "#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl\n" "#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl\n" "#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl\n" "#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl\n" "#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl\n" "#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl\n" "#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl\n" "#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl\n" "#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl\n" "#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl\n" "#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl\n" "#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl\n" "#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl\n" "#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl\n" "#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl\n" "#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl\n" "#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl\n" "#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl\n" "#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl\n" "#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl\n" "#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl\n" "#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl\n" "#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl\n" "#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl\n" "#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl\n" "#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl\n" "#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl\n" "#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl\n" "#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl\n" "#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl\n" "#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl\n" "#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl\n" "#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl\n" "#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl\n" "#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl\n" "#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl\n" "#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl\n" "#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl\n" "#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl\n" "#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl\n" "#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl\n" "#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl\n" "#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl\n" "#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl\n" "#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl\n" "#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl\n" "#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl\n" "#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl\n" "#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl\n" "#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl\n" "#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl\n" "#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl\n" "#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl\n" "#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl\n" "#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl\n" "#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl\n" "#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl\n" "#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl\n" "#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl\n" "#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl\n" "#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl\n" "#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl\n" "#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl\n" "#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl\n" "#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl\n" "#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl\n" "#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl\n" "#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl\n" "#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl\n" "#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl\n" "#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl\n" "#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl\n" "#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl\n" "#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl\n" "#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl\n" "#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl\n" "#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl\n" "#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl\n" "#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl\n" "#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl\n" "#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl\n" "#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl\n" "#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl\n" "#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl\n" "#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl\n" "#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl\n" "#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl\n" "#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl\n" "#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl\n" "#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl\n" "#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl\n" "#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl\n" "#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl\n" "#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl\n" "#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl\n" "#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl\n" "#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl\n" "#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl\n" "#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl\n" "#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl\n" "#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl\n" "#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl\n" "#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl\n" "#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl\n" "#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl\n" "#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl\n" "#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl\n" "#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl\n" "#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl\n" "#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl\n" "#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl\n" "#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl\n" "#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl\n" "#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl\n" "#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl\n" "#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl\n" "#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl\n" "#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl\n" "#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl\n" "#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl\n" "#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl\n" "#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl\n" "#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl\n" "#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl\n" "#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl\n" "#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl\n" "#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl\n" "#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl\n" "#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl\n" "#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl\n" "#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl\n" "#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl\n" "#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl\n" "#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl\n" "#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl\n" "#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl\n" "#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl\n" "#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl\n" "#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl\n" "#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl\n" "#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl\n" "#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl\n" "#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl\n" "#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl\n" "#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl\n" "#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl\n" "#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl\n" "#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl\n" "#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl\n" "#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl\n" "#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl\n" "#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl\n" "#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl\n" "#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl\n" "#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl\n" "#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl\n" "#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl\n" "#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl\n" "#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl\n" "#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl\n" "#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl\n" "#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl\n" "#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl\n" "#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl\n" "#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl\n" "#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl\n" "#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl\n" "#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl\n" "#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl\n" "#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl\n" "#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl\n" "#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl\n" "#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl\n" "#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl\n" "#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl\n" "#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl\n" "#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl\n" "#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl\n" "#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl\n" "#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl\n" "#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl\n" "#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl\n" "#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl\n" "#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl\n" "#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl\n" "#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl\n" "#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl\n" "#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl\n" "#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl\n" "#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl\n" "#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl\n" "#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl\n" "#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl\n" "#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl\n" "#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl\n" "#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl\n" "#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl\n" "#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl\n" "#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl\n" "#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl\n" "#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl\n" "#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl\n" "#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl\n" "#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl\n" "#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl\n" "#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl\n" "#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl\n" "#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl\n" "#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl\n" "#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl\n" "#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl\n" "#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl\n" "#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl\n" "#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl\n" "#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl\n" "#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl\n" "#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl\n" "#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl\n" "#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl\n" "#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl\n" "#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl\n" "#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl\n" "#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl\n" "#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl\n" "#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl\n" "#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl\n" "#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl\n" "#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl\n" "#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl\n" "#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl\n" "#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl\n" "#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl\n" "#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl\n" "#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl\n" "#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl\n" "#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl\n" "#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl\n" "#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl\n" "#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl\n" "#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl\n" "#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl\n" "#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl\n" "#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl\n" "#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl\n" "#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl\n" "#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl\n" "#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl\n" "#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl\n" "#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl\n" "#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl\n" "#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl\n" "#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl\n" "#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl\n" "#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl\n" "#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl\n" "#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl\n" "#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl\n" "#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl\n" "#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl\n" "#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl\n" "#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl\n" "#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl\n" "#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl\n" "#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl\n" "#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl\n" "#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl\n" "#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl\n" "#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl\n" "#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl\n" "#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl\n" "#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl\n" "#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl\n" "#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl\n" "#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl\n" "#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl\n" "#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl\n" "#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl\n" "#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl\n" "#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl\n" "#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl\n" "#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl\n" "#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl\n" "#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl\n" "#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl\n" "#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl\n" "#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl\n" "#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl\n" "#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl\n" "#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl\n" "#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl\n" "#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl\n" "#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl\n" "#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl\n" "#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl\n" "#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl\n" "#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl\n" "#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl\n" "#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl\n" "#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl\n" "#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl\n" "#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl\n" "#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl\n" "#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl\n" "#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml\n" "#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl\n" "#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml\n" "#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl\n" "#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl\n" "#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl\n" "#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl\n" "#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl\n" "#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl\n" "#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl\n" "#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl\n" "#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl\n" "#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml\n" "#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml\n" "#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml\n" "#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml\n" "#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl\n" "#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml\n" "#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl\n" "#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml\n" "#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl\n" "#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml\n" "#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl\n" "#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml\n" "#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl\n" "#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml\n" "#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl\n" "#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml\n" "#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl\n" "#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml\n" "#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl\n" "#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml\n" "#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl\n" "#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml\n" "#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl\n" "#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml\n" "#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl\n" "#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml\n" "#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl\n" "#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml\n" "#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl\n" "#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml\n" "#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl\n" "#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml\n" "#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl\n" "#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml\n" "#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl\n" "#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml\n" "#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl\n" "#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml\n" "#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl\n" "#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml\n" "#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl\n" "#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml\n" "#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl\n" "#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml\n" "#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl\n" "#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml\n" "#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl\n" "#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml\n" "#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl\n" "#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml\n" "#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl\n" "#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml\n" "#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl\n" "#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml\n" "#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl\n" "#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml\n" "#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl\n" "#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml\n" "#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl\n" "#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml\n" "#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl\n" "#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl\n" "#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml\n" "#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml\n" "#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl\n" "#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl\n" "#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml\n" "#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml\n" "#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl\n" "#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl\n" "#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml\n" "#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml\n" "#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl\n" "#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl\n" "#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml\n" "#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml\n" "#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl\n" "#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl\n" "#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml\n" "#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml\n" "#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl\n" "#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl\n" "#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml\n" "#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml\n" "#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl\n" "#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl\n" "#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml\n" "#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml\n" "#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl\n" "#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl\n" "#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml\n" "#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml\n" "#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl\n" "#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl\n" "#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml\n" "#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml\n" "#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl\n" "#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl\n" "#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml\n" "#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml\n" "#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl\n" "#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl\n" "#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml\n" "#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml\n" "#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl\n" "#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl\n" "#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml\n" "#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml\n" "#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl\n" "#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl\n" "#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml\n" "#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml\n" "#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl\n" "#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl\n" "#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml\n" "#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml\n" "#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl\n" "#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl\n" "#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl\n" "#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl\n" "#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl\n" "#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl\n" "#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl\n" "#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl\n" "#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl\n" "#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl\n" "#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl\n" "#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl\n" "#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl\n" "#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl\n" "#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl\n" "#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl\n" "#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl\n" "#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl\n" "#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl\n" "#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl\n" "#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl\n" "#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl\n" "#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl\n" "#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl\n" "#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl\n" "#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl\n" "#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl\n" "#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl\n" "#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl\n" "#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml\n" "#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl\n" "#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml\n" "#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl\n" "#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml\n" "#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl\n" "#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml\n" "#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl\n" "#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml\n" "#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl\n" "#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml\n" "#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl\n" "#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml\n" "#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl\n" "#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml\n" "#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl\n" "#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml\n" "#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl\n" "#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml\n" "#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl\n" "#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml\n" "#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl\n" "#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml\n" "#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl\n" "#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml\n" "#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl\n" "#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml\n" "#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl\n" "#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml\n" "#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl\n" "#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml\n" "#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl\n" "#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml\n" "#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl\n" "#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml\n" "#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl\n" "#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml\n" "#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl\n" "#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml\n" "#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl\n" "#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml\n" "#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl\n" "#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml\n" "#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl\n" "#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml\n" "#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl\n" "#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml\n" "#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl\n" "#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml\n" "#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl\n" "#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml\n" "#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl\n" "#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml\n" "#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl\n" "#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml\n" "#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl\n" "#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl\n" "#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml\n" "#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml\n" "#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl\n" "#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl\n" "#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml\n" "#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml\n" "#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl\n" "#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl\n" "#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml\n" "#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml\n" "#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl\n" "#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl\n" "#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml\n" "#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml\n" "#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl\n" "#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl\n" "#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml\n" "#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml\n" "#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl\n" "#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl\n" "#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml\n" "#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml\n" "#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl\n" "#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl\n" "#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml\n" "#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml\n" "#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl\n" "#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl\n" "#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml\n" "#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml\n" "#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl\n" "#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl\n" "#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml\n" "#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml\n" "#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl\n" "#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl\n" "#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml\n" "#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml\n" "#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl\n" "#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl\n" "#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml\n" "#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml\n" "#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl\n" "#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl\n" "#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml\n" "#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml\n" "#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl\n" "#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl\n" "#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml\n" "#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml\n" "#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl\n" "#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl\n" "#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml\n" "#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml\n" "#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl\n" "#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl\n" "#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl\n" "#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl\n" "#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl\n" "#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl\n" "#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl\n" "#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl\n" "#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl\n" "#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl\n" "#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl\n" "#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl\n" "#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl\n" "#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl\n" "#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl\n" "#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl\n" "#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl\n" "#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl\n" "#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl\n" "#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl\n" "#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl\n" "#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl\n" "#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl\n" "#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl\n" "#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl\n" "#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl\n" "#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl\n" "#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl\n" "#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl\n" "#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml\n" "#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl\n" "#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml\n" "#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl\n" "#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml\n" "#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl\n" "#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml\n" "#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl\n" "#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml\n" "#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl\n" "#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl\n" "#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl\n" "#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl\n" "#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl\n" "#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl\n" "#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl\n" "#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl\n" "#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl\n" "#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl\n" "#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl\n" "#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl\n" "#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl\n" "#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl\n" "#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl\n" "#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl\n" "#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl\n" "#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl\n" "#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl\n" "#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl\n" "#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl\n" "#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl\n" "#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl\n" "#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl\n" "#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl\n" "#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl\n" "#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl\n" "#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl\n" "#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl\n" "#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl\n" "#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl\n" "#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl\n" "#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl\n" "#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl\n" "#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl\n" "#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl\n" "#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl\n" "#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl\n" "#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl\n" "#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl\n" "#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl\n" "#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml\n" "#define _vel_vror_vvl __builtin_ve_vl_vror_vvl\n" "#define _vel_vror_vvml __builtin_ve_vl_vror_vvml\n" "#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl\n" "#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml\n" "#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl\n" "#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl\n" "#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml\n" "#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl\n" "#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl\n" "#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl\n" "#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml\n" "#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl\n" "#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl\n" "#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl\n" "#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml\n" "#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl\n" "#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl\n" "#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl\n" "#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml\n" "#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl\n" "#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl\n" "#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl\n" "#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml\n" "#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl\n" "#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl\n" "#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl\n" "#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml\n" "#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl\n" "#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl\n" "#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl\n" "#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml\n" "#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl\n" "#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl\n" "#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl\n" "#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml\n" "#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl\n" "#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl\n" "#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml\n" "#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl\n" "#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml\n" "#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl\n" "#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml\n" "#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl\n" "#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml\n" "#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl\n" "#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml\n" "#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl\n" "#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml\n" "#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl\n" "#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml\n" "#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl\n" "#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml\n" "#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl\n" "#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml\n" "#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl\n" "#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml\n" "#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl\n" "#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml\n" "#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl\n" "#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml\n" "#define _vel_andm_mmm __builtin_ve_vl_andm_mmm\n" "#define _vel_andm_MMM __builtin_ve_vl_andm_MMM\n" "#define _vel_orm_mmm __builtin_ve_vl_orm_mmm\n" "#define _vel_orm_MMM __builtin_ve_vl_orm_MMM\n" "#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm\n" "#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM\n" "#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm\n" "#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM\n" "#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm\n" "#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM\n" "#define _vel_negm_mm __builtin_ve_vl_negm_mm\n" "#define _vel_negm_MM __builtin_ve_vl_negm_MM\n" "#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml\n" "#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml\n" "#define _vel_tovm_sml __builtin_ve_vl_tovm_sml\n" "#define _vel_lcr_sss __builtin_ve_vl_lcr_sss\n" "#define _vel_scr_sss __builtin_ve_vl_scr_sss\n" "#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss\n" "#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss\n" "#define _vel_fencei __builtin_ve_vl_fencei\n" "#define _vel_fencem_s __builtin_ve_vl_fencem_s\n" "#define _vel_fencec_s __builtin_ve_vl_fencec_s\n" "#define _vel_svob __builtin_ve_vl_svob\n" "" } , { "/builtins/vpclmulqdqintrin.h" , "/*===------------ vpclmulqdqintrin.h - VPCLMULQDQ intrinsics ---------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __VPCLMULQDQINTRIN_H\n" "#define __VPCLMULQDQINTRIN_H\n" "\n" "#define _mm256_clmulepi64_epi128(A, B, I) \\\n" " ((__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \\\n" " (__v4di)(__m256i)(B), \\\n" " (char)(I)))\n" "\n" "#ifdef __AVX512FINTRIN_H\n" "#define _mm512_clmulepi64_epi128(A, B, I) \\\n" " ((__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \\\n" " (__v8di)(__m512i)(B), \\\n" " (char)(I)))\n" "#endif // __AVX512FINTRIN_H\n" "\n" "#endif /* __VPCLMULQDQINTRIN_H */\n" "\n" "" } , { "/builtins/waitpkgintrin.h" , "/*===----------------------- waitpkgintrin.h - WAITPKG --------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __WAITPKGINTRIN_H\n" "#define __WAITPKGINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"waitpkg\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_umonitor (void * __address)\n" "{\n" " __builtin_ia32_umonitor (__address);\n" "}\n" "\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_umwait (unsigned int __control, unsigned long long __counter)\n" "{\n" " return __builtin_ia32_umwait (__control,\n" " (unsigned int)(__counter >> 32), (unsigned int)__counter);\n" "}\n" "\n" "static __inline__ unsigned char __DEFAULT_FN_ATTRS\n" "_tpause (unsigned int __control, unsigned long long __counter)\n" "{\n" " return __builtin_ia32_tpause (__control,\n" " (unsigned int)(__counter >> 32), (unsigned int)__counter);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif /* __WAITPKGINTRIN_H */\n" "" } , { "/builtins/wasm_simd128.h" , "/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __WASM_SIMD128_H\n" "#define __WASM_SIMD128_H\n" "\n" "#include \n" "#include \n" "\n" "// User-facing type\n" "typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));\n" "\n" "// Internal types determined by clang builtin definitions\n" "typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));\n" "typedef signed char __i8x16\n" " __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef unsigned char __u8x16\n" " __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef unsigned short __u16x8\n" " __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef unsigned int __u32x4\n" " __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef unsigned long long __u64x2\n" " __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));\n" "typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));\n" "\n" "typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef unsigned char __u8x8\n" " __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef unsigned short __u16x4\n" " __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef int __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef unsigned int __u32x2\n" " __attribute__((__vector_size__(8), __aligned__(8)));\n" "typedef float __f32x2 __attribute__((__vector_size__(8), __aligned__(8)));\n" "\n" "#define __DEFAULT_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"simd128\"), \\\n" " __min_vector_width__(128)))\n" "\n" "#define __REQUIRE_CONSTANT(c) \\\n" " __attribute__((__diagnose_if__(!__builtin_constant_p(c), \\\n" " #c \" must be constant\", \"error\")))\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {\n" " // UB-free unaligned access copied from xmmintrin.h\n" " struct __wasm_v128_load_struct {\n" " __v128_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __wasm_v128_load_struct *)__mem)->__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load8_splat(const void *__mem) {\n" " struct __wasm_v128_load8_splat_struct {\n" " uint8_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " uint8_t __v = ((const struct __wasm_v128_load8_splat_struct *)__mem)->__v;\n" " return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,\n" " __v, __v, __v, __v, __v, __v, __v, __v};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load16_splat(const void *__mem) {\n" " struct __wasm_v128_load16_splat_struct {\n" " uint16_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " uint16_t __v = ((const struct __wasm_v128_load16_splat_struct *)__mem)->__v;\n" " return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load32_splat(const void *__mem) {\n" " struct __wasm_v128_load32_splat_struct {\n" " uint32_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " uint32_t __v = ((const struct __wasm_v128_load32_splat_struct *)__mem)->__v;\n" " return (v128_t)(__u32x4){__v, __v, __v, __v};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load64_splat(const void *__mem) {\n" " struct __wasm_v128_load64_splat_struct {\n" " uint64_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " uint64_t __v = ((const struct __wasm_v128_load64_splat_struct *)__mem)->__v;\n" " return (v128_t)(__u64x2){__v, __v};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_load8x8(const void *__mem) {\n" " struct __wasm_i16x8_load8x8_struct {\n" " __i8x8 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __i8x8 __v = ((const struct __wasm_i16x8_load8x8_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __i16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_load8x8(const void *__mem) {\n" " struct __wasm_u16x8_load8x8_struct {\n" " __u8x8 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __u8x8 __v = ((const struct __wasm_u16x8_load8x8_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __u16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_load16x4(const void *__mem) {\n" " struct __wasm_i32x4_load16x4_struct {\n" " __i16x4 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __i16x4 __v = ((const struct __wasm_i32x4_load16x4_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __i32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_load16x4(const void *__mem) {\n" " struct __wasm_u32x4_load16x4_struct {\n" " __u16x4 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __u16x4 __v = ((const struct __wasm_u32x4_load16x4_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __u32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i64x2_load32x2(const void *__mem) {\n" " struct __wasm_i64x2_load32x2_struct {\n" " __i32x2 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __i32x2 __v = ((const struct __wasm_i64x2_load32x2_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __i64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_load32x2(const void *__mem) {\n" " struct __wasm_u64x2_load32x2_struct {\n" " __u32x2 __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " __u32x2 __v = ((const struct __wasm_u64x2_load32x2_struct *)__mem)->__v;\n" " return (v128_t) __builtin_convertvector(__v, __u64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load32_zero(const void *__mem) {\n" " struct __wasm_v128_load32_zero_struct {\n" " int32_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int32_t __v = ((const struct __wasm_v128_load32_zero_struct *)__mem)->__v;\n" " return (v128_t)(__i32x4){__v, 0, 0, 0};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_v128_load64_zero(const void *__mem) {\n" " struct __wasm_v128_load64_zero_struct {\n" " int64_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int64_t __v = ((const struct __wasm_v128_load64_zero_struct *)__mem)->__v;\n" " return (v128_t)(__i64x2){__v, 0};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_lane(\n" " const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_load8_lane_struct {\n" " int8_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int8_t __v = ((const struct __wasm_v128_load8_lane_struct *)__mem)->__v;\n" " __i8x16 __ret = (__i8x16)__vec;\n" " __ret[__i] = __v;\n" " return (v128_t)__ret;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_lane(\n" " const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_load16_lane_struct {\n" " int16_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int16_t __v = ((const struct __wasm_v128_load16_lane_struct *)__mem)->__v;\n" " __i16x8 __ret = (__i16x8)__vec;\n" " __ret[__i] = __v;\n" " return (v128_t)__ret;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_lane(\n" " const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_load32_lane_struct {\n" " int32_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int32_t __v = ((const struct __wasm_v128_load32_lane_struct *)__mem)->__v;\n" " __i32x4 __ret = (__i32x4)__vec;\n" " __ret[__i] = __v;\n" " return (v128_t)__ret;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_lane(\n" " const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_load64_lane_struct {\n" " int64_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " int64_t __v = ((const struct __wasm_v128_load64_lane_struct *)__mem)->__v;\n" " __i64x2 __ret = (__i64x2)__vec;\n" " __ret[__i] = __v;\n" " return (v128_t)__ret;\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,\n" " v128_t __a) {\n" " // UB-free unaligned access copied from xmmintrin.h\n" " struct __wasm_v128_store_struct {\n" " __v128_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __wasm_v128_store_struct *)__mem)->__v = __a;\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store8_lane(void *__mem,\n" " v128_t __vec,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_store8_lane_struct {\n" " int8_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __wasm_v128_store8_lane_struct *)__mem)->__v = ((__i8x16)__vec)[__i];\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store16_lane(void *__mem,\n" " v128_t __vec,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_store16_lane_struct {\n" " int16_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __wasm_v128_store16_lane_struct *)__mem)->__v =\n" " ((__i16x8)__vec)[__i];\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store32_lane(void *__mem,\n" " v128_t __vec,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_store32_lane_struct {\n" " int32_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __wasm_v128_store32_lane_struct *)__mem)->__v =\n" " ((__i32x4)__vec)[__i];\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store64_lane(void *__mem,\n" " v128_t __vec,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " struct __wasm_v128_store64_lane_struct {\n" " int64_t __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __wasm_v128_store64_lane_struct *)__mem)->__v =\n" " ((__i64x2)__vec)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,\n" " int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,\n" " int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,\n" " int8_t __c14, int8_t __c15) {\n" " return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,\n" " __c6, __c7, __c8, __c9, __c10, __c11,\n" " __c12, __c13, __c14, __c15};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u8x16_make(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,\n" " uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,\n" " uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,\n" " uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15) {\n" " return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,\n" " __c6, __c7, __c8, __c9, __c10, __c11,\n" " __c12, __c13, __c14, __c15};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,\n" " int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {\n" " return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_make(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,\n" " uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7) {\n" " return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,\n" " int32_t __c1,\n" " int32_t __c2,\n" " int32_t __c3) {\n" " return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_make(uint32_t __c0,\n" " uint32_t __c1,\n" " uint32_t __c2,\n" " uint32_t __c3) {\n" " return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,\n" " int64_t __c1) {\n" " return (v128_t)(__i64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_make(uint64_t __c0,\n" " uint64_t __c1) {\n" " return (v128_t)(__u64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,\n" " float __c1,\n" " float __c2,\n" " float __c3) {\n" " return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,\n" " double __c1) {\n" " return (v128_t)(__f64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3,\n" " int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7,\n" " int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11,\n" " int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)\n" " __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)\n" " __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)\n" " __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)\n" " __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)\n" " __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)\n" " __REQUIRE_CONSTANT(__c15) {\n" " return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,\n" " __c6, __c7, __c8, __c9, __c10, __c11,\n" " __c12, __c13, __c14, __c15};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u8x16_const(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,\n" " uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,\n" " uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,\n" " uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)\n" " __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)\n" " __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)\n" " __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)\n" " __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)\n" " __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)\n" " __REQUIRE_CONSTANT(__c15) {\n" " return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,\n" " __c6, __c7, __c8, __c9, __c10, __c11,\n" " __c12, __c13, __c14, __c15};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,\n" " int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)\n" " __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)\n" " __REQUIRE_CONSTANT(__c7) {\n" " return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_const(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,\n" " uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)\n" " __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)\n" " __REQUIRE_CONSTANT(__c7) {\n" " return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) {\n" " return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_const(uint32_t __c0, uint32_t __c1, uint32_t __c2, uint32_t __c3)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) {\n" " return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0,\n" " int64_t __c1)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {\n" " return (v128_t)(__i64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const(uint64_t __c0,\n" " uint64_t __c1)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {\n" " return (v128_t)(__u64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)\n" " __REQUIRE_CONSTANT(__c3) {\n" " return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const(double __c0,\n" " double __c1)\n" " __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {\n" " return (v128_t)(__f64x2){__c0, __c1};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__i8x16){__c, __c, __c, __c, __c, __c, __c, __c,\n" " __c, __c, __c, __c, __c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_const_splat(uint8_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__u8x16){__c, __c, __c, __c, __c, __c, __c, __c,\n" " __c, __c, __c, __c, __c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__i16x8){__c, __c, __c, __c, __c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_const_splat(uint16_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__u16x8){__c, __c, __c, __c, __c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__i32x4){__c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_const_splat(uint32_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__u32x4){__c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__i64x2){__c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const_splat(uint64_t __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__u64x2){__c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__f32x4){__c, __c, __c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const_splat(double __c)\n" " __REQUIRE_CONSTANT(__c) {\n" " return (v128_t)(__f64x2){__c, __c};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {\n" " return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,\n" " __a, __a, __a, __a, __a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_splat(uint8_t __a) {\n" " return (v128_t)(__u8x16){__a, __a, __a, __a, __a, __a, __a, __a,\n" " __a, __a, __a, __a, __a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__i8x16)__a)[__i];\n" "}\n" "\n" "static __inline__ uint8_t __DEFAULT_FN_ATTRS wasm_u8x16_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__u8x16)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a,\n" " int __i,\n" " int8_t __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __i8x16 __v = (__i8x16)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_replace_lane(v128_t __a,\n" " int __i,\n" " uint8_t __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __u8x16 __v = (__u8x16)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {\n" " return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_splat(uint16_t __a) {\n" " return (v128_t)(__u16x8){__a, __a, __a, __a, __a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__i16x8)__a)[__i];\n" "}\n" "\n" "static __inline__ uint16_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {\n" " return ((__u16x8)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a,\n" " int __i,\n" " int16_t __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __i16x8 __v = (__i16x8)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_replace_lane(\n" " v128_t __a, int __i, uint16_t __b) __REQUIRE_CONSTANT(__i) {\n" " __u16x8 __v = (__u16x8)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {\n" " return (v128_t)(__i32x4){__a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_splat(uint32_t __a) {\n" " return (v128_t)(__u32x4){__a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__i32x4)__a)[__i];\n" "}\n" "\n" "static __inline__ uint32_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {\n" " return ((__u32x4)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a,\n" " int __i,\n" " int32_t __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __i32x4 __v = (__i32x4)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_replace_lane(\n" " v128_t __a, int __i, uint32_t __b) __REQUIRE_CONSTANT(__i) {\n" " __u32x4 __v = (__u32x4)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {\n" " return (v128_t)(__i64x2){__a, __a};\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_splat(uint64_t __a) {\n" " return (v128_t)(__u64x2){__a, __a};\n" "}\n" "\n" "static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__i64x2)__a)[__i];\n" "}\n" "\n" "static __inline__ uint64_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {\n" " return ((__u64x2)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a,\n" " int __i,\n" " int64_t __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __i64x2 __v = (__i64x2)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_replace_lane(\n" " v128_t __a, int __i, uint64_t __b) __REQUIRE_CONSTANT(__i) {\n" " __u64x2 __v = (__u64x2)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {\n" " return (v128_t)(__f32x4){__a, __a, __a, __a};\n" "}\n" "\n" "static __inline__ float __DEFAULT_FN_ATTRS wasm_f32x4_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__f32x4)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_replace_lane(v128_t __a,\n" " int __i,\n" " float __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __f32x4 __v = (__f32x4)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {\n" " return (v128_t)(__f64x2){__a, __a};\n" "}\n" "\n" "static __inline__ double __DEFAULT_FN_ATTRS wasm_f64x2_extract_lane(v128_t __a,\n" " int __i)\n" " __REQUIRE_CONSTANT(__i) {\n" " return ((__f64x2)__a)[__i];\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_replace_lane(v128_t __a,\n" " int __i,\n" " double __b)\n" " __REQUIRE_CONSTANT(__i) {\n" " __f64x2 __v = (__f64x2)__a;\n" " __v[__i] = __b;\n" " return (v128_t)__v;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a == (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a != (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a < (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a < (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a > (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a > (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a <= (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a <= (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i8x16)__a >= (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a >= (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a == (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a != (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a < (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a < (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a > (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a > (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a <= (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a <= (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a >= (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a >= (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a == (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a != (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a < (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a < (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a > (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a > (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a <= (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a <= (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i32x4)__a >= (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a >= (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a == (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a != (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a < (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a > (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a <= (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i64x2)__a >= (__i64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a == (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a != (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a < (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a > (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a <= (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a >= (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a == (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a != (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a < (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a > (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a <= (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a >= (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {\n" " return ~__a;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,\n" " v128_t __b) {\n" " return __a & __b;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,\n" " v128_t __b) {\n" " return __a | __b;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,\n" " v128_t __b) {\n" " return __a ^ __b;\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,\n" " v128_t __b) {\n" " return __a & ~__b;\n" "}\n" "\n" "static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a) {\n" " return __builtin_wasm_any_true_v128((__i8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,\n" " v128_t __b,\n" " v128_t __mask) {\n" " return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,\n" " (__i32x4)__mask);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {\n" " return (v128_t)(-(__u8x16)__a);\n" "}\n" "\n" "static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {\n" " return __builtin_wasm_all_true_i8x16((__i8x16)__a);\n" "}\n" "\n" "static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {\n" " return __builtin_wasm_bitmask_i8x16((__i8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {\n" " return (v128_t)__builtin_wasm_popcnt_i8x16((__i8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i8x16)__a << (__b & 0x7));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i8x16)__a >> (__b & 0x7));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__u8x16)__a >> (__b & 0x7));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a + (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u8x16)__a - (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {\n" " return (v128_t)(-(__u16x8)__a);\n" "}\n" "\n" "static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {\n" " return __builtin_wasm_all_true_i16x8((__i16x8)__a);\n" "}\n" "\n" "static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {\n" " return __builtin_wasm_bitmask_i16x8((__i16x8)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i16x8)__a << (__b & 0xF));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i16x8)__a >> (__b & 0xF));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__u16x8)__a >> (__b & 0xF));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a + (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__i16x8)__a - (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u16x8)__a * (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {\n" " return (v128_t)(-(__u32x4)__a);\n" "}\n" "\n" "static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {\n" " return __builtin_wasm_all_true_i32x4((__i32x4)__a);\n" "}\n" "\n" "static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {\n" " return __builtin_wasm_bitmask_i32x4((__i32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i32x4)__a << (__b & 0x1F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i32x4)__a >> (__b & 0x1F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__u32x4)__a >> (__b & 0x1F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a + (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a - (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u32x4)__a * (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {\n" " return (v128_t)(-(__u64x2)__a);\n" "}\n" "\n" "static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {\n" " return __builtin_wasm_all_true_i64x2((__i64x2)__a);\n" "}\n" "\n" "static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {\n" " return __builtin_wasm_bitmask_i64x2((__i64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i64x2)__a << ((int64_t)__b & 0x3F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__i64x2)__a >> ((int64_t)__b & 0x3F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,\n" " uint32_t __b) {\n" " return (v128_t)((__u64x2)__a >> ((int64_t)__b & 0x3F));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u64x2)__a + (__u64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u64x2)__a - (__u64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__u64x2)__a * (__u64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {\n" " return (v128_t)(-(__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {\n" " return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a) {\n" " return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a) {\n" " return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a) {\n" " return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a + (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a - (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a * (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f32x4)__a / (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {\n" " return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {\n" " return (v128_t)(-(__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {\n" " return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a) {\n" " return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a) {\n" " return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a) {\n" " return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a + (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a - (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a * (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)((__f64x2)__a / (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_trunc_sat_f32x4(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_trunc_sat_f32x4(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f32x4_convert_i32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f32x4_convert_u32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f64x2_convert_low_i32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector((__i32x2){__a[0], __a[1]}, __f64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f64x2_convert_low_u32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector((__u32x2){__a[0], __a[1]}, __f64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {\n" " return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f32x4_demote_f64x2_zero(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " __builtin_shufflevector((__f64x2)__a, (__f64x2){0, 0}, 0, 1, 2, 3),\n" " __f32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_f64x2_promote_low_f32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__f32x2){((__f32x4)__a)[0], ((__f32x4)__a)[1]}, __f64x2);\n" "}\n" "\n" "#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \\\n" " __c7, __c8, __c9, __c10, __c11, __c12, __c13, \\\n" " __c14, __c15) \\\n" " ((v128_t)__builtin_wasm_shuffle_i8x16( \\\n" " (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \\\n" " __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))\n" "\n" "#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \\\n" " __c7) \\\n" " ((v128_t)__builtin_wasm_shuffle_i8x16( \\\n" " (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \\\n" " (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \\\n" " (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \\\n" " (__c7)*2 + 1))\n" "\n" "#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \\\n" " ((v128_t)__builtin_wasm_shuffle_i8x16( \\\n" " (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \\\n" " (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \\\n" " (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \\\n" " (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))\n" "\n" "#define wasm_i64x2_shuffle(__a, __b, __c0, __c1) \\\n" " ((v128_t)__builtin_wasm_shuffle_i8x16( \\\n" " (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \\\n" " (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \\\n" " (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \\\n" " (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)__a, (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,\n" " (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,\n" " (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,\n" " (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,\n" " (__i32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_extend_low_i8x16(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],\n" " ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],\n" " ((__i8x16)__a)[6], ((__i8x16)__a)[7]},\n" " __i16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_extend_high_i8x16(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],\n" " ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],\n" " ((__i8x16)__a)[14], ((__i8x16)__a)[15]},\n" " __i16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extend_low_u8x16(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],\n" " ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],\n" " ((__u8x16)__a)[6], ((__u8x16)__a)[7]},\n" " __u16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extend_high_u8x16(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],\n" " ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],\n" " ((__u8x16)__a)[14], ((__u8x16)__a)[15]},\n" " __u16x8);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_extend_low_i16x8(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],\n" " ((__i16x8)__a)[3]},\n" " __i32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_extend_high_i16x8(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],\n" " ((__i16x8)__a)[7]},\n" " __i32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extend_low_u16x8(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],\n" " ((__u16x8)__a)[3]},\n" " __u32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extend_high_u16x8(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],\n" " ((__u16x8)__a)[7]},\n" " __u32x4);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i64x2_extend_low_i32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i32x2){((__i32x4)__a)[0], ((__i32x4)__a)[1]}, __i64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i64x2_extend_high_i32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__i32x2){((__i32x4)__a)[2], ((__i32x4)__a)[3]}, __i64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_extend_low_u32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u32x2){((__u32x4)__a)[0], ((__u32x4)__a)[1]}, __u64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_extend_high_u32x4(v128_t __a) {\n" " return (v128_t) __builtin_convertvector(\n" " (__u32x2){((__u32x4)__a)[2], ((__u32x4)__a)[3]}, __u64x2);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_extadd_pairwise_i8x16(v128_t __a) {\n" " return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extadd_pairwise_u8x16(v128_t __a) {\n" " return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_extadd_pairwise_i16x8(v128_t __a) {\n" " return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extadd_pairwise_u16x8(v128_t __a) {\n" " return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)__a);\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i16x8)wasm_i16x8_extend_low_i8x16(__a) *\n" " (__i16x8)wasm_i16x8_extend_low_i8x16(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i16x8)wasm_i16x8_extend_high_i8x16(__a) *\n" " (__i16x8)wasm_i16x8_extend_high_i8x16(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u16x8)wasm_u16x8_extend_low_u8x16(__a) *\n" " (__u16x8)wasm_u16x8_extend_low_u8x16(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u16x8)wasm_u16x8_extend_high_u8x16(__a) *\n" " (__u16x8)wasm_u16x8_extend_high_u8x16(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i32x4)wasm_i32x4_extend_low_i16x8(__a) *\n" " (__i32x4)wasm_i32x4_extend_low_i16x8(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i32x4)wasm_i32x4_extend_high_i16x8(__a) *\n" " (__i32x4)wasm_i32x4_extend_high_i16x8(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u32x4)wasm_u32x4_extend_low_u16x8(__a) *\n" " (__u32x4)wasm_u32x4_extend_low_u16x8(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u32x4)wasm_u32x4_extend_high_u16x8(__a) *\n" " (__u32x4)wasm_u32x4_extend_high_u16x8(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i64x2)wasm_i64x2_extend_low_i32x4(__a) *\n" " (__i64x2)wasm_i64x2_extend_low_i32x4(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)((__i64x2)wasm_i64x2_extend_high_i32x4(__a) *\n" " (__i64x2)wasm_i64x2_extend_high_i32x4(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u64x2)wasm_u64x2_extend_low_u32x4(__a) *\n" " (__u64x2)wasm_u64x2_extend_low_u32x4(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS\n" "wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b) {\n" " return (v128_t)((__u64x2)wasm_u64x2_extend_high_u32x4(__a) *\n" " (__u64x2)wasm_u64x2_extend_high_u32x4(__b));\n" "}\n" "\n" "static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);\n" "}\n" "\n" "// Old intrinsic names supported to ease transitioning to the standard names. Do\n" "// not use these; they will be removed in the near future.\n" "\n" "#define __DEPRECATED_FN_ATTRS(__replacement) \\\n" " __DEFAULT_FN_ATTRS __attribute__( \\\n" " (deprecated(\"use \" __replacement \" instead\", __replacement)))\n" "\n" "#define __WASM_STR(X) #X\n" "\n" "#ifdef __DEPRECATED\n" "#define __DEPRECATED_WASM_MACRO(__name, __replacement) \\\n" " _Pragma(__WASM_STR(GCC warning( \\\n" " \"'\" __name \"' is deprecated: use '\" __replacement \"' instead\")))\n" "#else\n" "#define __DEPRECATED_WASM_MACRO(__name, __replacement)\n" "#endif\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_v128_load8_splat\")\n" "wasm_v8x16_load_splat(const void *__mem) {\n" " return wasm_v128_load8_splat(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_v128_load16_splat\")\n" "wasm_v16x8_load_splat(const void *__mem) {\n" " return wasm_v128_load16_splat(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_v128_load32_splat\")\n" "wasm_v32x4_load_splat(const void *__mem) {\n" " return wasm_v128_load32_splat(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_v128_load64_splat\")\n" "wasm_v64x2_load_splat(const void *__mem) {\n" " return wasm_v128_load64_splat(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i16x8_load8x8\")\n" "wasm_i16x8_load_8x8(const void *__mem) {\n" " return wasm_i16x8_load8x8(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u16x8_load8x8\")\n" "wasm_u16x8_load_8x8(const void *__mem) {\n" " return wasm_u16x8_load8x8(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i32x4_load16x4\")\n" "wasm_i32x4_load_16x4(const void *__mem) {\n" " return wasm_i32x4_load16x4(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u32x4_load16x4\")\n" "wasm_u32x4_load_16x4(const void *__mem) {\n" " return wasm_u32x4_load16x4(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i64x2_load32x2\")\n" "wasm_i64x2_load_32x2(const void *__mem) {\n" " return wasm_i64x2_load32x2(__mem);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u64x2_load32x2\")\n" "wasm_u64x2_load_32x2(const void *__mem) {\n" " return wasm_u64x2_load32x2(__mem);\n" "}\n" "\n" "#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \\\n" " __c7, __c8, __c9, __c10, __c11, __c12, __c13, \\\n" " __c14, __c15) \\\n" " __DEPRECATED_WASM_MACRO(\"wasm_v8x16_shuffle\", \"wasm_i8x16_shuffle\") \\\n" " wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \\\n" " __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)\n" "\n" "#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \\\n" " __c7) \\\n" " __DEPRECATED_WASM_MACRO(\"wasm_v16x8_shuffle\", \"wasm_i16x8_shuffle\") \\\n" " wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)\n" "\n" "#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \\\n" " __DEPRECATED_WASM_MACRO(\"wasm_v32x4_shuffle\", \"wasm_i32x4_shuffle\") \\\n" " wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)\n" "\n" "#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \\\n" " __DEPRECATED_WASM_MACRO(\"wasm_v64x2_shuffle\", \"wasm_i64x2_shuffle\") \\\n" " wasm_i64x2_shuffle(__a, __b, __c0, __c1)\n" "\n" "// Relaxed SIMD intrinsics\n" "\n" "#define __RELAXED_FN_ATTRS \\\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"relaxed-simd\"), \\\n" " __min_vector_width__(128)))\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {\n" " return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)__a, (__f32x4)__b,\n" " (__f32x4)__c);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {\n" " return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)__a, (__f32x4)__b,\n" " (__f32x4)__c);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {\n" " return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)__a, (__f64x2)__b,\n" " (__f64x2)__c);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {\n" " return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)__a, (__f64x2)__b,\n" " (__f64x2)__c);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {\n" " return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16(\n" " (__i8x16)__a, (__i8x16)__b, (__i8x16)__m);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {\n" " return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8(\n" " (__i16x8)__a, (__i16x8)__b, (__i16x8)__m);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {\n" " return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4(\n" " (__i32x4)__a, (__i32x4)__b, (__i32x4)__m);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {\n" " return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2(\n" " (__i64x2)__a, (__i64x2)__b, (__i64x2)__m);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s) {\n" " return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)__a,\n" " (__i8x16)__s);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)__a, (__f32x4)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a,\n" " v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)__a, (__f64x2)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i32x4_relaxed_trunc_f32x4(v128_t __a) {\n" " return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_u32x4_relaxed_trunc_f32x4(v128_t __a) {\n" " return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)__a);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a) {\n" " return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a) {\n" " return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)__a);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)__a,\n" " (__i16x8)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b) {\n" " return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)__a,\n" " (__i8x16)__b);\n" "}\n" "\n" "static __inline__ v128_t __RELAXED_FN_ATTRS\n" "wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c) {\n" " return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4(\n" " (__i8x16)__a, (__i8x16)__b, (__i32x4)__c);\n" "}\n" "\n" "// Deprecated intrinsics\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i8x16_swizzle\")\n" "wasm_v8x16_swizzle(v128_t __a, v128_t __b) {\n" " return wasm_i8x16_swizzle(__a, __b);\n" "}\n" "\n" "static __inline__ bool __DEPRECATED_FN_ATTRS(\"wasm_v128_any_true\")\n" "wasm_i8x16_any_true(v128_t __a) {\n" " return wasm_v128_any_true(__a);\n" "}\n" "\n" "static __inline__ bool __DEPRECATED_FN_ATTRS(\"wasm_v128_any_true\")\n" "wasm_i16x8_any_true(v128_t __a) {\n" " return wasm_v128_any_true(__a);\n" "}\n" "\n" "static __inline__ bool __DEPRECATED_FN_ATTRS(\"wasm_v128_any_true\")\n" "wasm_i32x4_any_true(v128_t __a) {\n" " return wasm_v128_any_true(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i8x16_add_sat\")\n" "wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {\n" " return wasm_i8x16_add_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u8x16_add_sat\")\n" "wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {\n" " return wasm_u8x16_add_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i8x16_sub_sat\")\n" "wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {\n" " return wasm_i8x16_sub_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u8x16_sub_sat\")\n" "wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {\n" " return wasm_u8x16_sub_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i16x8_add_sat\")\n" "wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {\n" " return wasm_i16x8_add_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u16x8_add_sat\")\n" "wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {\n" " return wasm_u16x8_add_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i16x8_sub_sat\")\n" "wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {\n" " return wasm_i16x8_sub_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u16x8_sub_sat\")\n" "wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {\n" " return wasm_u16x8_sub_sat(__a, __b);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i16x8_extend_low_i8x16\")\n" "wasm_i16x8_widen_low_i8x16(v128_t __a) {\n" " return wasm_i16x8_extend_low_i8x16(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i16x8_extend_high_i8x16\")\n" "wasm_i16x8_widen_high_i8x16(v128_t __a) {\n" " return wasm_i16x8_extend_high_i8x16(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u16x8_extend_low_u8x16\")\n" "wasm_i16x8_widen_low_u8x16(v128_t __a) {\n" " return wasm_u16x8_extend_low_u8x16(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u16x8_extend_high_u8x16\")\n" "wasm_i16x8_widen_high_u8x16(v128_t __a) {\n" " return wasm_u16x8_extend_high_u8x16(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i32x4_extend_low_i16x8\")\n" "wasm_i32x4_widen_low_i16x8(v128_t __a) {\n" " return wasm_i32x4_extend_low_i16x8(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i32x4_extend_high_i16x8\")\n" "wasm_i32x4_widen_high_i16x8(v128_t __a) {\n" " return wasm_i32x4_extend_high_i16x8(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u32x4_extend_low_u16x8\")\n" "wasm_i32x4_widen_low_u16x8(v128_t __a) {\n" " return wasm_u32x4_extend_low_u16x8(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u32x4_extend_high_u16x8\")\n" "wasm_i32x4_widen_high_u16x8(v128_t __a) {\n" " return wasm_u32x4_extend_high_u16x8(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_i32x4_trunc_sat_f32x4\")\n" "wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {\n" " return wasm_i32x4_trunc_sat_f32x4(__a);\n" "}\n" "\n" "static __inline__ v128_t __DEPRECATED_FN_ATTRS(\"wasm_u32x4_trunc_sat_f32x4\")\n" "wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {\n" " return wasm_u32x4_trunc_sat_f32x4(__a);\n" "}\n" "\n" "// Undefine helper macros\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEPRECATED_FN_ATTRS\n" "\n" "#endif // __WASM_SIMD128_H\n" "" } , { "/builtins/wbnoinvdintrin.h" , "/*===-------------- wbnoinvdintrin.h - wbnoinvd intrinsic-------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __WBNOINVDINTRIN_H\n" "#define __WBNOINVDINTRIN_H\n" "\n" "static __inline__ void\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"wbnoinvd\")))\n" "_wbnoinvd (void)\n" "{\n" " __builtin_ia32_wbnoinvd ();\n" "}\n" "\n" "#endif /* __WBNOINVDINTRIN_H */\n" "" } , { "/builtins/wmmintrin.h" , "/*===---- wmmintrin.h - AES intrinsics ------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __WMMINTRIN_H\n" "#define __WMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "#include <__wmmintrin_aes.h>\n" "\n" "#include <__wmmintrin_pclmul.h>\n" "\n" "#endif /* __WMMINTRIN_H */\n" "" } , { "/builtins/x86gprintrin.h" , "/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86GPRINTRIN_H\n" "#define __X86GPRINTRIN_H\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__HRESET__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__UINTR__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CRC32__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__PRFCHI__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RAOINT__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CMPCCXADD__)\n" "#include \n" "#endif\n" "\n" "#if defined(__i386__)\n" "#define __SAVE_GPRBX \"mov {%%ebx, %%eax |eax, ebx};\"\n" "#define __RESTORE_GPRBX \"mov {%%eax, %%ebx |ebx, eax};\"\n" "#define __TMPGPR \"eax\"\n" "#else\n" "// When in 64-bit target, the 32-bit operands generate a 32-bit result,\n" "// zero-extended to a 64-bit result in the destination general-purpose,\n" "// It means \"mov x %ebx\" will clobber the higher 32 bits of rbx, so we\n" "// should preserve the 64-bit register rbx.\n" "#define __SAVE_GPRBX \"mov {%%rbx, %%rax |rax, rbx};\"\n" "#define __RESTORE_GPRBX \"mov {%%rax, %%rbx |rbx, rax};\"\n" "#define __TMPGPR \"rax\"\n" "#endif\n" "\n" "#define __SSC_MARK(__Tag) \\\n" " __asm__ __volatile__( __SAVE_GPRBX \\\n" " \"mov {%0, %%ebx|ebx, %0}; \" \\\n" " \".byte 0x64, 0x67, 0x90; \" \\\n" " __RESTORE_GPRBX \\\n" " ::\"i\"(__Tag) \\\n" " : __TMPGPR );\n" "\n" "#endif /* __X86GPRINTRIN_H */\n" "" } , { "/builtins/x86intrin.h" , "/*===---- x86intrin.h - X86 intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#define __X86INTRIN_H\n" "\n" "#include \n" "\n" "#include \n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__3dNOW__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__PRFCHW__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__SSE4A__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__FMA4__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__XOP__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__TBM__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__LWP__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__MWAITX__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__CLZERO__)\n" "#include \n" "#endif\n" "\n" "#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \\\n" " defined(__RDPRU__)\n" "#include \n" "#endif\n" "\n" "#endif /* __X86INTRIN_H */\n" "" } , { "/builtins/xmmintrin.h" , "/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __XMMINTRIN_H\n" "#define __XMMINTRIN_H\n" "\n" "#if !defined(__i386__) && !defined(__x86_64__)\n" "#error \"This header is only meant to be used on x86 and x64 architecture\"\n" "#endif\n" "\n" "#include \n" "\n" "typedef int __v4si __attribute__((__vector_size__(16)));\n" "typedef float __v4sf __attribute__((__vector_size__(16)));\n" "typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));\n" "\n" "typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));\n" "\n" "/* Unsigned types */\n" "typedef unsigned int __v4su __attribute__((__vector_size__(16)));\n" "\n" "/* This header should only be included in a hosted environment as it depends on\n" " * a standard library to provide allocation routines. */\n" "#if __STDC_HOSTED__\n" "#include \n" "#endif\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"sse\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__(\"mmx,sse\"), __min_vector_width__(64)))\n" "\n" "/// Adds the 32-bit float values in the low-order bits of the operands.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDSS / ADDSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The lower 32 bits of this operand are used in the calculation.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The lower 32 bits of this operand are used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum\n" "/// of the lower 32 bits of both operands. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_add_ss(__m128 __a, __m128 __b)\n" "{\n" " __a[0] += __b[0];\n" " return __a;\n" "}\n" "\n" "/// Adds two 128-bit vectors of [4 x float], and returns the results of\n" "/// the addition.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VADDPS / ADDPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the sums of both\n" "/// operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_add_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4sf)__a + (__v4sf)__b);\n" "}\n" "\n" "/// Subtracts the 32-bit float value in the low-order bits of the second\n" "/// operand from the corresponding value in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBSS / SUBSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits\n" "/// of this operand are used in the calculation.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the subtrahend. The lower 32\n" "/// bits of this operand are used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// difference of the lower 32 bits of both operands. The upper 96 bits are\n" "/// copied from the upper 96 bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_sub_ss(__m128 __a, __m128 __b)\n" "{\n" " __a[0] -= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Subtracts each of the values of the second operand from the first\n" "/// operand, both of which are 128-bit vectors of [4 x float] and returns\n" "/// the results of the subtraction.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSUBPS / SUBPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the minuend.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the subtrahend.\n" "/// \\returns A 128-bit vector of [4 x float] containing the differences between\n" "/// both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_sub_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4sf)__a - (__v4sf)__b);\n" "}\n" "\n" "/// Multiplies two 32-bit float values in the low-order bits of the\n" "/// operands.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULSS / MULSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The lower 32 bits of this operand are used in the calculation.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// The lower 32 bits of this operand are used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] containing the product of the lower\n" "/// 32 bits of both operands. The upper 96 bits are copied from the upper 96\n" "/// bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_mul_ss(__m128 __a, __m128 __b)\n" "{\n" " __a[0] *= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Multiplies two 128-bit vectors of [4 x float] and returns the\n" "/// results of the multiplication.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMULPS / MULPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the products of both\n" "/// operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_mul_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4sf)__a * (__v4sf)__b);\n" "}\n" "\n" "/// Divides the value in the low-order 32 bits of the first operand by\n" "/// the corresponding value in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVSS / DIVSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the dividend. The lower 32\n" "/// bits of this operand are used in the calculation.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits\n" "/// of this operand are used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] containing the quotients of the\n" "/// lower 32 bits of both operands. The upper 96 bits are copied from the\n" "/// upper 96 bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_div_ss(__m128 __a, __m128 __b)\n" "{\n" " __a[0] /= __b[0];\n" " return __a;\n" "}\n" "\n" "/// Divides two 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VDIVPS / DIVPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the dividend.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the divisor.\n" "/// \\returns A 128-bit vector of [4 x float] containing the quotients of both\n" "/// operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_div_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4sf)__a / (__v4sf)__b);\n" "}\n" "\n" "/// Calculates the square root of the value stored in the low-order bits\n" "/// of a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTSS / SQRTSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] containing the square root of the\n" "/// value in the low-order bits of the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_sqrt_ss(__m128 __a)\n" "{\n" " return (__m128)__builtin_ia32_sqrtss((__v4sf)__a);\n" "}\n" "\n" "/// Calculates the square roots of the values stored in a 128-bit vector\n" "/// of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSQRTPS / SQRTPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the square roots of the\n" "/// values in the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_sqrt_ps(__m128 __a)\n" "{\n" " return __builtin_ia32_sqrtps((__v4sf)__a);\n" "}\n" "\n" "/// Calculates the approximate reciprocal of the value stored in the\n" "/// low-order bits of a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRCPSS / RCPSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] containing the approximate\n" "/// reciprocal of the value in the low-order bits of the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_rcp_ss(__m128 __a)\n" "{\n" " return (__m128)__builtin_ia32_rcpss((__v4sf)__a);\n" "}\n" "\n" "/// Calculates the approximate reciprocals of the values stored in a\n" "/// 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRCPPS / RCPPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the approximate\n" "/// reciprocals of the values in the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_rcp_ps(__m128 __a)\n" "{\n" " return (__m128)__builtin_ia32_rcpps((__v4sf)__a);\n" "}\n" "\n" "/// Calculates the approximate reciprocal of the square root of the value\n" "/// stored in the low-order bits of a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRSQRTSS / RSQRTSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the calculation.\n" "/// \\returns A 128-bit vector of [4 x float] containing the approximate\n" "/// reciprocal of the square root of the value in the low-order bits of the\n" "/// operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_rsqrt_ss(__m128 __a)\n" "{\n" " return __builtin_ia32_rsqrtss((__v4sf)__a);\n" "}\n" "\n" "/// Calculates the approximate reciprocals of the square roots of the\n" "/// values stored in a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VRSQRTPS / RSQRTPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the approximate\n" "/// reciprocals of the square roots of the values in the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_rsqrt_ps(__m128 __a)\n" "{\n" " return __builtin_ia32_rsqrtps((__v4sf)__a);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands and returns the lesser value in the low-order bits of the\n" "/// vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINSS / MINSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// minimum value between both operands. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_min_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 128-bit vectors of [4 x float] and returns the lesser\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMINPS / MINPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the minimum values\n" "/// between both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_min_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands and returns the greater value in the low-order bits of a 128-bit\n" "/// vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXSS / MAXSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// maximum value between both operands. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_max_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 128-bit vectors of [4 x float] and returns the greater\n" "/// of each pair of values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMAXPS / MAXPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the maximum values\n" "/// between both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_max_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDPS / ANDPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector containing one of the source operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the bitwise AND of the\n" "/// values between both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_and_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4su)__a & (__v4su)__b);\n" "}\n" "\n" "/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using\n" "/// the one's complement of the values contained in the first source\n" "/// operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VANDNPS / ANDNPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the first source operand. The\n" "/// one's complement of this value is used in the bitwise AND.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing the second source operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the bitwise AND of the\n" "/// one's complement of the first operand and the values in the second\n" "/// operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_andnot_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)(~(__v4su)__a & (__v4su)__b);\n" "}\n" "\n" "/// Performs a bitwise OR of two 128-bit vectors of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VORPS / ORPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the bitwise OR of the\n" "/// values between both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_or_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4su)__a | (__v4su)__b);\n" "}\n" "\n" "/// Performs a bitwise exclusive OR of two 128-bit vectors of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS / XORPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the source operands.\n" "/// \\returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR\n" "/// of the values between both operands.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_xor_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)((__v4su)__a ^ (__v4su)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands for equality and returns the result of the comparison in the\n" "/// low-order bits of a vector [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPEQSS / CMPEQSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpeq_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] for equality.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpeq_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is less than the\n" "/// corresponding value in the second operand and returns the result of the\n" "/// comparison in the low-order bits of a vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmplt_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are less than those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmplt_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is less than or\n" "/// equal to the corresponding value in the second operand and returns the\n" "/// result of the comparison in the low-order bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmple_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are less than or equal to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmple_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is greater than\n" "/// the corresponding value in the second operand and returns the result of\n" "/// the comparison in the low-order bits of a vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpgt_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_shufflevector((__v4sf)__a,\n" " (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a),\n" " 4, 1, 2, 3);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are greater than those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpgt_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is greater than\n" "/// or equal to the corresponding value in the second operand and returns\n" "/// the result of the comparison in the low-order bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpge_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_shufflevector((__v4sf)__a,\n" " (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a),\n" " 4, 1, 2, 3);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are greater than or equal to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpge_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands for inequality and returns the result of the comparison in the\n" "/// low-order bits of a vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNEQSS / CMPNEQSS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpneq_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] for inequality.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpneq_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is not less than\n" "/// the corresponding value in the second operand and returns the result of\n" "/// the comparison in the low-order bits of a vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnlt_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are not less than those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnlt_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is not less than\n" "/// or equal to the corresponding value in the second operand and returns\n" "/// the result of the comparison in the low-order bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnle_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are not less than or equal to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnle_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is not greater\n" "/// than the corresponding value in the second operand and returns the\n" "/// result of the comparison in the low-order bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpngt_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_shufflevector((__v4sf)__a,\n" " (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a),\n" " 4, 1, 2, 3);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are not greater than those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpngt_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is not greater\n" "/// than or equal to the corresponding value in the second operand and\n" "/// returns the result of the comparison in the low-order bits of a vector\n" "/// of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnge_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_shufflevector((__v4sf)__a,\n" " (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a),\n" " 4, 1, 2, 3);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are not greater than or equal to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpnge_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is ordered with\n" "/// respect to the corresponding value in the second operand and returns the\n" "/// result of the comparison in the low-order bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPORDSS / CMPORDSS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpord_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are ordered with respect to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPORDPS / CMPORDPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpord_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the value in the first operand is unordered\n" "/// with respect to the corresponding value in the second operand and\n" "/// returns the result of the comparison in the low-order bits of a vector\n" "/// of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPUNORDSS / CMPUNORDSS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float] containing one of the operands. The lower\n" "/// 32 bits of this operand are used in the comparison.\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results\n" "/// in the low-order bits.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpunord_ss(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares each of the corresponding 32-bit float values of the\n" "/// 128-bit vectors of [4 x float] to determine if the values in the first\n" "/// operand are unordered with respect to those in the second operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 128-bit vector of [4 x float] containing the comparison results.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cmpunord_ps(__m128 __a, __m128 __b)\n" "{\n" " return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands for equality and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the\n" "/// two lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comieq_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the first operand is less than the second\n" "/// operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comilt_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the first operand is less than or equal to the\n" "/// second operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comile_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the first operand is greater than the second\n" "/// operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the\n" "/// two lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comigt_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the first operand is greater than or equal to\n" "/// the second operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comige_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Compares two 32-bit float values in the low-order bits of both\n" "/// operands to determine if the first operand is not equal to the second\n" "/// operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 1 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCOMISS / COMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the\n" "/// two lower 32-bit values is NaN, 1 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_comineq_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine equality and returns\n" "/// the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomieq_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine if the first operand is\n" "/// less than the second operand and returns the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomilt_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine if the first operand is\n" "/// less than or equal to the second operand and returns the result of the\n" "/// comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomile_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine if the first operand is\n" "/// greater than the second operand and returns the result of the\n" "/// comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomigt_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine if the first operand is\n" "/// greater than or equal to the second operand and returns the result of\n" "/// the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 0 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 0 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomige_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Performs an unordered comparison of two 32-bit float values using\n" "/// the low-order bits of both operands to determine inequality and returns\n" "/// the result of the comparison.\n" "///\n" "/// If either of the two lower 32-bit values is NaN, 1 is returned.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the comparison.\n" "/// \\returns An integer containing the comparison results. If either of the two\n" "/// lower 32-bit values is NaN, 1 is returned.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_ucomineq_ss(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);\n" "}\n" "\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 32-bit integer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 32-bit integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_cvtss_si32(__m128 __a)\n" "{\n" " return __builtin_ia32_cvtss2si((__v4sf)__a);\n" "}\n" "\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 32-bit integer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 32-bit integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_cvt_ss2si(__m128 __a)\n" "{\n" " return _mm_cvtss_si32(__a);\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 64-bit integer.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 64-bit integer containing the converted value.\n" "static __inline__ long long __DEFAULT_FN_ATTRS\n" "_mm_cvtss_si64(__m128 __a)\n" "{\n" " return __builtin_ia32_cvtss2si64((__v4sf)__a);\n" "}\n" "\n" "#endif\n" "\n" "/// Converts two low-order float values in a 128-bit vector of\n" "/// [4 x float] into a 64-bit vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPS2PI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 64-bit integer vector containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtps_pi32(__m128 __a)\n" "{\n" " return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);\n" "}\n" "\n" "/// Converts two low-order float values in a 128-bit vector of\n" "/// [4 x float] into a 64-bit vector of [2 x i32].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPS2PI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 64-bit integer vector containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvt_ps2pi(__m128 __a)\n" "{\n" " return _mm_cvtps_pi32(__a);\n" "}\n" "\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 32-bit integer, truncating the result when it is\n" "/// inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 32-bit integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_cvttss_si32(__m128 __a)\n" "{\n" " return __builtin_ia32_cvttss2si((__v4sf)__a);\n" "}\n" "\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 32-bit integer, truncating the result when it is\n" "/// inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 32-bit integer containing the converted value.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_cvtt_ss2si(__m128 __a)\n" "{\n" " return _mm_cvttss_si32(__a);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Converts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float] into a 64-bit integer, truncating the result when it is\n" "/// inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the conversion.\n" "/// \\returns A 64-bit integer containing the converted value.\n" "static __inline__ long long __DEFAULT_FN_ATTRS\n" "_mm_cvttss_si64(__m128 __a)\n" "{\n" " return __builtin_ia32_cvttss2si64((__v4sf)__a);\n" "}\n" "#endif\n" "\n" "/// Converts two low-order float values in a 128-bit vector of\n" "/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result\n" "/// when it is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTTPS2PI / VTTPS2PI \n" "/// instructions.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 64-bit integer vector containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvttps_pi32(__m128 __a)\n" "{\n" " return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);\n" "}\n" "\n" "/// Converts two low-order float values in a 128-bit vector of [4 x\n" "/// float] into a 64-bit vector of [2 x i32], truncating the result when it\n" "/// is inexact.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTTPS2PI instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\returns A 64-bit integer vector containing the converted values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtt_ps2pi(__m128 __a)\n" "{\n" " return _mm_cvttps_pi32(__a);\n" "}\n" "\n" "/// Converts a 32-bit signed integer value into a floating point value\n" "/// and writes it to the lower 32 bits of the destination. The remaining\n" "/// higher order elements of the destination vector are copied from the\n" "/// corresponding elements in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 32-bit signed integer operand containing the value to be converted.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// converted value of the second operand. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cvtsi32_ss(__m128 __a, int __b)\n" "{\n" " __a[0] = __b;\n" " return __a;\n" "}\n" "\n" "/// Converts a 32-bit signed integer value into a floating point value\n" "/// and writes it to the lower 32 bits of the destination. The remaining\n" "/// higher order elements of the destination are copied from the\n" "/// corresponding elements in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 32-bit signed integer operand containing the value to be converted.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// converted value of the second operand. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cvt_si2ss(__m128 __a, int __b)\n" "{\n" " return _mm_cvtsi32_ss(__a, __b);\n" "}\n" "\n" "#ifdef __x86_64__\n" "\n" "/// Converts a 64-bit signed integer value into a floating point value\n" "/// and writes it to the lower 32 bits of the destination. The remaining\n" "/// higher order elements of the destination are copied from the\n" "/// corresponding elements in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 64-bit signed integer operand containing the value to be converted.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 32 bits contain the\n" "/// converted value of the second operand. The upper 96 bits are copied from\n" "/// the upper 96 bits of the first operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_cvtsi64_ss(__m128 __a, long long __b)\n" "{\n" " __a[0] = __b;\n" " return __a;\n" "}\n" "\n" "#endif\n" "\n" "/// Converts two elements of a 64-bit vector of [2 x i32] into two\n" "/// floating point values and writes them to the lower 64-bits of the\n" "/// destination. The remaining higher order elements of the destination are\n" "/// copied from the corresponding elements in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 64-bit vector of [2 x i32]. The elements in this vector are converted\n" "/// and written to the corresponding low-order elements in the destination.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 64 bits contain the\n" "/// converted value of the second operand. The upper 64 bits are copied from\n" "/// the upper 64 bits of the first operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpi32_ps(__m128 __a, __m64 __b)\n" "{\n" " return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);\n" "}\n" "\n" "/// Converts two elements of a 64-bit vector of [2 x i32] into two\n" "/// floating point values and writes them to the lower 64-bits of the\n" "/// destination. The remaining higher order elements of the destination are\n" "/// copied from the corresponding elements in the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param __b\n" "/// A 64-bit vector of [2 x i32]. The elements in this vector are converted\n" "/// and written to the corresponding low-order elements in the destination.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 64 bits contain the\n" "/// converted value from the second operand. The upper 64 bits are copied\n" "/// from the upper 64 bits of the first operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvt_pi2ps(__m128 __a, __m64 __b)\n" "{\n" " return _mm_cvtpi32_ps(__a, __b);\n" "}\n" "\n" "/// Extracts a float value contained in the lower 32 bits of a vector of\n" "/// [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are\n" "/// used in the extraction.\n" "/// \\returns A 32-bit float containing the extracted value.\n" "static __inline__ float __DEFAULT_FN_ATTRS\n" "_mm_cvtss_f32(__m128 __a)\n" "{\n" " return __a[0];\n" "}\n" "\n" "/// Loads two packed float values from the address \\a __p into the\n" "/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits\n" "/// are copied from the low-order bits of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]\n" "/// of the destination.\n" "/// \\param __p\n" "/// A pointer to two packed float values. Bits [63:0] are written to bits\n" "/// [127:64] of the destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the moved values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_loadh_pi(__m128 __a, const __m64 *__p)\n" "{\n" " typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));\n" " struct __mm_loadh_pi_struct {\n" " __mm_loadh_pi_v2f32 __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " __mm_loadh_pi_v2f32 __b = ((const struct __mm_loadh_pi_struct*)__p)->__u;\n" " __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);\n" " return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);\n" "}\n" "\n" "/// Loads two packed float values from the address \\a __p into the\n" "/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits\n" "/// are copied from the high-order bits of the first operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits\n" "/// [127:64] of the destination.\n" "/// \\param __p\n" "/// A pointer to two packed float values. Bits [63:0] are written to bits\n" "/// [63:0] of the destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the moved values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_loadl_pi(__m128 __a, const __m64 *__p)\n" "{\n" " typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));\n" " struct __mm_loadl_pi_struct {\n" " __mm_loadl_pi_v2f32 __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " __mm_loadl_pi_v2f32 __b = ((const struct __mm_loadl_pi_struct*)__p)->__u;\n" " __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);\n" " return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]. The lower\n" "/// 32 bits of the vector are initialized with the single-precision\n" "/// floating-point value loaded from a specified memory location. The upper\n" "/// 96 bits are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSS / MOVSS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 32-bit memory location containing a single-precision\n" "/// floating-point value.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float]. The\n" "/// lower 32 bits contain the value loaded from the memory location. The\n" "/// upper 96 bits are set to zero.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_load_ss(const float *__p)\n" "{\n" " struct __mm_load_ss_struct {\n" " float __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " float __u = ((const struct __mm_load_ss_struct*)__p)->__u;\n" " return __extension__ (__m128){ __u, 0, 0, 0 };\n" "}\n" "\n" "/// Loads a 32-bit float value and duplicates it to all four vector\n" "/// elements of a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBROADCASTSS / MOVSS + shuffling \n" "/// instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a float value to be loaded and duplicated.\n" "/// \\returns A 128-bit vector of [4 x float] containing the loaded and\n" "/// duplicated values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_load1_ps(const float *__p)\n" "{\n" " struct __mm_load1_ps_struct {\n" " float __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " float __u = ((const struct __mm_load1_ps_struct*)__p)->__u;\n" " return __extension__ (__m128){ __u, __u, __u, __u };\n" "}\n" "\n" "#define _mm_load_ps1(p) _mm_load1_ps(p)\n" "\n" "/// Loads a 128-bit floating-point vector of [4 x float] from an aligned\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location has to be 128-bit aligned.\n" "/// \\returns A 128-bit vector of [4 x float] containing the loaded values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_load_ps(const float *__p)\n" "{\n" " return *(const __m128*)__p;\n" "}\n" "\n" "/// Loads a 128-bit floating-point vector of [4 x float] from an\n" "/// unaligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\returns A 128-bit vector of [4 x float] containing the loaded values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_loadu_ps(const float *__p)\n" "{\n" " struct __loadu_ps {\n" " __m128_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " return ((const struct __loadu_ps*)__p)->__v;\n" "}\n" "\n" "/// Loads four packed float values, in reverse order, from an aligned\n" "/// memory location to 32-bit elements in a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling \n" "/// instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location has to be 128-bit aligned.\n" "/// \\returns A 128-bit vector of [4 x float] containing the moved values, loaded\n" "/// in reverse order.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_loadr_ps(const float *__p)\n" "{\n" " __m128 __a = _mm_load_ps(__p);\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);\n" "}\n" "\n" "/// Create a 128-bit vector of [4 x float] with undefined values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic has no corresponding instruction.\n" "///\n" "/// \\returns A 128-bit vector of [4 x float] containing undefined values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_undefined_ps(void)\n" "{\n" " return (__m128)__builtin_ia32_undef128();\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]. The lower\n" "/// 32 bits of the vector are initialized with the specified single-precision\n" "/// floating-point value. The upper 96 bits are set to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSS / MOVSS instruction.\n" "///\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize the lower 32\n" "/// bits of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float]. The\n" "/// lower 32 bits contain the value provided in the source operand. The\n" "/// upper 96 bits are set to zero.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_set_ss(float __w)\n" "{\n" " return __extension__ (__m128){ __w, 0, 0, 0 };\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float], with each\n" "/// of the four single-precision floating-point vector elements set to the\n" "/// specified single-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction.\n" "///\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_set1_ps(float __w)\n" "{\n" " return __extension__ (__m128){ __w, __w, __w, __w };\n" "}\n" "\n" "/* Microsoft specific. */\n" "/// Constructs a 128-bit floating-point vector of [4 x float], with each\n" "/// of the four single-precision floating-point vector elements set to the\n" "/// specified single-precision floating-point value.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction.\n" "///\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize each vector\n" "/// element of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_set_ps1(float __w)\n" "{\n" " return _mm_set1_ps(__w);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]\n" "/// initialized with the specified single-precision floating-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __z\n" "/// A single-precision floating-point value used to initialize bits [127:96]\n" "/// of the result.\n" "/// \\param __y\n" "/// A single-precision floating-point value used to initialize bits [95:64]\n" "/// of the result.\n" "/// \\param __x\n" "/// A single-precision floating-point value used to initialize bits [63:32]\n" "/// of the result.\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize bits [31:0]\n" "/// of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_set_ps(float __z, float __y, float __x, float __w)\n" "{\n" " return __extension__ (__m128){ __w, __x, __y, __z };\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float],\n" "/// initialized in reverse order with the specified 32-bit single-precision\n" "/// float-point values.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic is a utility function and does not correspond to a specific\n" "/// instruction.\n" "///\n" "/// \\param __z\n" "/// A single-precision floating-point value used to initialize bits [31:0]\n" "/// of the result.\n" "/// \\param __y\n" "/// A single-precision floating-point value used to initialize bits [63:32]\n" "/// of the result.\n" "/// \\param __x\n" "/// A single-precision floating-point value used to initialize bits [95:64]\n" "/// of the result.\n" "/// \\param __w\n" "/// A single-precision floating-point value used to initialize bits [127:96]\n" "/// of the result.\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_setr_ps(float __z, float __y, float __x, float __w)\n" "{\n" " return __extension__ (__m128){ __z, __y, __x, __w };\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float] initialized\n" "/// to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VXORPS / XORPS instruction.\n" "///\n" "/// \\returns An initialized 128-bit floating-point vector of [4 x float] with\n" "/// all elements set to zero.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_setzero_ps(void)\n" "{\n" " return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };\n" "}\n" "\n" "/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 64-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_storeh_pi(__m64 *__p, __m128 __a)\n" "{\n" " typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));\n" " struct __mm_storeh_pi_struct {\n" " __mm_storeh_pi_v2f32 __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3);\n" "}\n" "\n" "/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a memory location that will receive the float values.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_storel_pi(__m64 *__p, __m128 __a)\n" "{\n" " typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));\n" " struct __mm_storeh_pi_struct {\n" " __mm_storeh_pi_v2f32 __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1);\n" "}\n" "\n" "/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a\n" "/// memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVSS / MOVSS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 32-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_store_ss(float *__p, __m128 __a)\n" "{\n" " struct __mm_store_ss_struct {\n" " float __u;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __mm_store_ss_struct*)__p)->__u = __a[0];\n" "}\n" "\n" "/// Stores a 128-bit vector of [4 x float] to an unaligned memory\n" "/// location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location does not have to be aligned.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_storeu_ps(float *__p, __m128 __a)\n" "{\n" " struct __storeu_ps {\n" " __m128_u __v;\n" " } __attribute__((__packed__, __may_alias__));\n" " ((struct __storeu_ps*)__p)->__v = __a;\n" "}\n" "\n" "/// Stores a 128-bit vector of [4 x float] into an aligned memory\n" "/// location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location has to be 16-byte aligned.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_store_ps(float *__p, __m128 __a)\n" "{\n" " *(__m128*)__p = __a;\n" "}\n" "\n" "/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into\n" "/// four contiguous elements in an aligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling \n" "/// instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each\n" "/// of the four contiguous elements pointed by \\a __p.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_store1_ps(float *__p, __m128 __a)\n" "{\n" " __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0);\n" " _mm_store_ps(__p, __a);\n" "}\n" "\n" "/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into\n" "/// four contiguous elements in an aligned memory location.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling \n" "/// instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each\n" "/// of the four contiguous elements pointed by \\a __p.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_store_ps1(float *__p, __m128 __a)\n" "{\n" " _mm_store1_ps(__p, __a);\n" "}\n" "\n" "/// Stores float values from a 128-bit vector of [4 x float] to an\n" "/// aligned memory location in reverse order.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling \n" "/// instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit memory location. The address of the memory\n" "/// location has to be 128-bit aligned.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_storer_ps(float *__p, __m128 __a)\n" "{\n" " __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);\n" " _mm_store_ps(__p, __a);\n" "}\n" "\n" "#define _MM_HINT_ET0 7\n" "#define _MM_HINT_ET1 6\n" "#define _MM_HINT_T0 3\n" "#define _MM_HINT_T1 2\n" "#define _MM_HINT_T2 1\n" "#define _MM_HINT_NTA 0\n" "\n" "#ifndef _MSC_VER\n" "/* FIXME: We have to #define this because \"sel\" must be a constant integer, and\n" " Sema doesn't do any form of constant propagation yet. */\n" "\n" "/// Loads one cache line of data from the specified address to a location\n" "/// closer to the processor.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// void _mm_prefetch(const void *a, const int sel);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the PREFETCHNTA instruction.\n" "///\n" "/// \\param a\n" "/// A pointer to a memory location containing a cache line of data.\n" "/// \\param sel\n" "/// A predefined integer constant specifying the type of prefetch\n" "/// operation: \\n\n" "/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The\n" "/// PREFETCHNTA instruction will be generated. \\n\n" "/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will\n" "/// be generated. \\n\n" "/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will\n" "/// be generated. \\n\n" "/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will\n" "/// be generated.\n" "#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \\\n" " ((sel) >> 2) & 1, (sel) & 0x3))\n" "#endif\n" "\n" "/// Stores a 64-bit integer in the specified aligned memory location. To\n" "/// minimize caching, the data is flagged as non-temporal (unlikely to be\n" "/// used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MOVNTQ instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to an aligned memory location used to store the register value.\n" "/// \\param __a\n" "/// A 64-bit integer containing the value to be stored.\n" "static __inline__ void __DEFAULT_FN_ATTRS_MMX\n" "_mm_stream_pi(__m64 *__p, __m64 __a)\n" "{\n" " __builtin_ia32_movntq(__p, __a);\n" "}\n" "\n" "/// Moves packed float values from a 128-bit vector of [4 x float] to a\n" "/// 128-bit aligned memory location. To minimize caching, the data is flagged\n" "/// as non-temporal (unlikely to be used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction.\n" "///\n" "/// \\param __p\n" "/// A pointer to a 128-bit aligned memory location that will receive the\n" "/// single-precision floating-point values.\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float] containing the values to be moved.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_mm_stream_ps(float *__p, __m128 __a)\n" "{\n" " __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);\n" "}\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/// Forces strong memory ordering (serialization) between store\n" "/// instructions preceding this instruction and store instructions following\n" "/// this instruction, ensuring the system completes all previous stores\n" "/// before executing subsequent stores.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the SFENCE instruction.\n" "///\n" "void _mm_sfence(void);\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "\n" "/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and\n" "/// returns it, as specified by the immediate integer operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// int _mm_extract_pi16(__m64 a, int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction.\n" "///\n" "/// \\param a\n" "/// A 64-bit vector of [4 x i16].\n" "/// \\param n\n" "/// An immediate integer operand that determines which bits are extracted: \\n\n" "/// 0: Bits [15:0] are copied to the destination. \\n\n" "/// 1: Bits [31:16] are copied to the destination. \\n\n" "/// 2: Bits [47:32] are copied to the destination. \\n\n" "/// 3: Bits [63:48] are copied to the destination.\n" "/// \\returns A 16-bit integer containing the extracted 16 bits of packed data.\n" "#define _mm_extract_pi16(a, n) \\\n" " ((int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n))\n" "\n" "/// Copies data from the 64-bit vector of [4 x i16] to the destination,\n" "/// and inserts the lower 16-bits of an integer operand at the 16-bit offset\n" "/// specified by the immediate operand \\a n.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m64 _mm_insert_pi16(__m64 a, int d, int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the PINSRW instruction.\n" "///\n" "/// \\param a\n" "/// A 64-bit vector of [4 x i16].\n" "/// \\param d\n" "/// An integer. The lower 16-bit value from this operand is written to the\n" "/// destination at the offset specified by operand \\a n.\n" "/// \\param n\n" "/// An immediate integer operant that determines which the bits to be used\n" "/// in the destination. \\n\n" "/// 0: Bits [15:0] are copied to the destination. \\n\n" "/// 1: Bits [31:16] are copied to the destination. \\n\n" "/// 2: Bits [47:32] are copied to the destination. \\n\n" "/// 3: Bits [63:48] are copied to the destination. \\n\n" "/// The remaining bits in the destination are copied from the corresponding\n" "/// bits in operand \\a a.\n" "/// \\returns A 64-bit integer vector containing the copied packed data from the\n" "/// operands.\n" "#define _mm_insert_pi16(a, d, n) \\\n" " ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n))\n" "\n" "/// Compares each of the corresponding packed 16-bit integer values of\n" "/// the 64-bit integer vectors, and writes the greater value to the\n" "/// corresponding bits in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMAXSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the comparison results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_max_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding packed 8-bit unsigned integer\n" "/// values of the 64-bit integer vectors, and writes the greater value to the\n" "/// corresponding bits in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMAXUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the comparison results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_max_pu8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding packed 16-bit integer values of\n" "/// the 64-bit integer vectors, and writes the lesser value to the\n" "/// corresponding bits in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMINSW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the comparison results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_min_pi16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Compares each of the corresponding packed 8-bit unsigned integer\n" "/// values of the 64-bit integer vectors, and writes the lesser value to the\n" "/// corresponding bits in the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMINUB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the comparison results.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_min_pu8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// Takes the most significant bit from each 8-bit element in a 64-bit\n" "/// integer vector to create an 8-bit mask value. Zero-extends the value to\n" "/// 32-bit integer and writes it to the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMOVMSKB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing the values with bits to be extracted.\n" "/// \\returns The most significant bit from each 8-bit element in \\a __a,\n" "/// written to bits [7:0].\n" "static __inline__ int __DEFAULT_FN_ATTRS_MMX\n" "_mm_movemask_pi8(__m64 __a)\n" "{\n" " return __builtin_ia32_pmovmskb((__v8qi)__a);\n" "}\n" "\n" "/// Multiplies packed 16-bit unsigned integer values and writes the\n" "/// high-order 16 bits of each 32-bit product to the corresponding bits in\n" "/// the destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PMULHUW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the products of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_mulhi_pu16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the\n" "/// destination, as specified by the immediate value operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m64 _mm_shuffle_pi16(__m64 a, const int n);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the PSHUFW instruction.\n" "///\n" "/// \\param a\n" "/// A 64-bit integer vector containing the values to be shuffled.\n" "/// \\param n\n" "/// An immediate value containing an 8-bit value specifying which elements to\n" "/// copy from \\a a. The destinations within the 64-bit destination are\n" "/// assigned values as follows: \\n\n" "/// Bits [1:0] are used to assign values to bits [15:0] in the\n" "/// destination. \\n\n" "/// Bits [3:2] are used to assign values to bits [31:16] in the\n" "/// destination. \\n\n" "/// Bits [5:4] are used to assign values to bits [47:32] in the\n" "/// destination. \\n\n" "/// Bits [7:6] are used to assign values to bits [63:48] in the\n" "/// destination. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: assigned from bits [15:0] of \\a a. \\n\n" "/// 01: assigned from bits [31:16] of \\a a. \\n\n" "/// 10: assigned from bits [47:32] of \\a a. \\n\n" "/// 11: assigned from bits [63:48] of \\a a. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 64-bit integer vector containing the shuffled values.\n" "#define _mm_shuffle_pi16(a, n) \\\n" " ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))\n" "\n" "/// Conditionally copies the values from each 8-bit element in the first\n" "/// 64-bit integer vector operand to the specified memory location, as\n" "/// specified by the most significant bit in the corresponding element in the\n" "/// second 64-bit integer vector operand.\n" "///\n" "/// To minimize caching, the data is flagged as non-temporal\n" "/// (unlikely to be used again soon).\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the MASKMOVQ instruction.\n" "///\n" "/// \\param __d\n" "/// A 64-bit integer vector containing the values with elements to be copied.\n" "/// \\param __n\n" "/// A 64-bit integer vector operand. The most significant bit from each 8-bit\n" "/// element determines whether the corresponding element in operand \\a __d\n" "/// is copied. If the most significant bit of a given element is 1, the\n" "/// corresponding element in operand \\a __d is copied.\n" "/// \\param __p\n" "/// A pointer to a 64-bit memory location that will receive the conditionally\n" "/// copied integer values. The address of the memory location does not have\n" "/// to be aligned.\n" "static __inline__ void __DEFAULT_FN_ATTRS_MMX\n" "_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)\n" "{\n" " __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);\n" "}\n" "\n" "/// Computes the rounded averages of the packed unsigned 8-bit integer\n" "/// values and writes the averages to the corresponding bits in the\n" "/// destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PAVGB instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the averages of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_avg_pu8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "/// Computes the rounded averages of the packed unsigned 16-bit integer\n" "/// values and writes the averages to the corresponding bits in the\n" "/// destination.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PAVGW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector containing the averages of both operands.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_avg_pu16(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);\n" "}\n" "\n" "/// Subtracts the corresponding 8-bit unsigned integer values of the two\n" "/// 64-bit vector operands and computes the absolute value for each of the\n" "/// difference. Then sum of the 8 absolute differences is written to the\n" "/// bits [15:0] of the destination; the remaining bits [63:16] are cleared.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the PSADBW instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\param __b\n" "/// A 64-bit integer vector containing one of the source operands.\n" "/// \\returns A 64-bit integer vector whose lower 16 bits contain the sums of the\n" "/// sets of absolute differences between both operands. The upper bits are\n" "/// cleared.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_sad_pu8(__m64 __a, __m64 __b)\n" "{\n" " return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);\n" "}\n" "\n" "#if defined(__cplusplus)\n" "extern \"C\" {\n" "#endif\n" "\n" "/// Returns the contents of the MXCSR register as a 32-bit unsigned\n" "/// integer value.\n" "///\n" "/// There are several groups of macros associated with this\n" "/// intrinsic, including:\n" "///
    \n" "///
  • \n" "/// For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,\n" "/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,\n" "/// _MM_EXCEPT_INEXACT. There is a convenience wrapper\n" "/// _MM_GET_EXCEPTION_STATE().\n" "///
  • \n" "///
  • \n" "/// For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,\n" "/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.\n" "/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK().\n" "///
  • \n" "///
  • \n" "/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,\n" "/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper\n" "/// _MM_GET_ROUNDING_MODE().\n" "///
  • \n" "///
  • \n" "/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.\n" "/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().\n" "///
  • \n" "///
  • \n" "/// For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,\n" "/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper\n" "/// _MM_GET_DENORMALS_ZERO_MODE().\n" "///
  • \n" "///
\n" "///\n" "/// For example, the following expression checks if an overflow exception has\n" "/// occurred:\n" "/// \\code\n" "/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )\n" "/// \\endcode\n" "///\n" "/// The following expression gets the current rounding mode:\n" "/// \\code\n" "/// _MM_GET_ROUNDING_MODE()\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VSTMXCSR / STMXCSR instruction.\n" "///\n" "/// \\returns A 32-bit unsigned integer containing the contents of the MXCSR\n" "/// register.\n" "unsigned int _mm_getcsr(void);\n" "\n" "/// Sets the MXCSR register with the 32-bit unsigned integer value.\n" "///\n" "/// There are several groups of macros associated with this intrinsic,\n" "/// including:\n" "///
    \n" "///
  • \n" "/// For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,\n" "/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,\n" "/// _MM_EXCEPT_INEXACT. There is a convenience wrapper\n" "/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.\n" "///
  • \n" "///
  • \n" "/// For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,\n" "/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.\n" "/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one\n" "/// of these macros.\n" "///
  • \n" "///
  • \n" "/// For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,\n" "/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper\n" "/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros.\n" "///
  • \n" "///
  • \n" "/// For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.\n" "/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is\n" "/// one of these macros.\n" "///
  • \n" "///
  • \n" "/// For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,\n" "/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper\n" "/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.\n" "///
  • \n" "///
\n" "///\n" "/// For example, the following expression causes subsequent floating-point\n" "/// operations to round up:\n" "/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)\n" "///\n" "/// The following example sets the DAZ and FTZ flags:\n" "/// \\code\n" "/// void setFlags() {\n" "/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);\n" "/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);\n" "/// }\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VLDMXCSR / LDMXCSR instruction.\n" "///\n" "/// \\param __i\n" "/// A 32-bit unsigned integer value to be written to the MXCSR register.\n" "void _mm_setcsr(unsigned int __i);\n" "\n" "#if defined(__cplusplus)\n" "} // extern \"C\"\n" "#endif\n" "\n" "/// Selects 4 float values from the 128-bit operands of [4 x float], as\n" "/// specified by the immediate value operand.\n" "///\n" "/// \\headerfile \n" "///\n" "/// \\code\n" "/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);\n" "/// \\endcode\n" "///\n" "/// This intrinsic corresponds to the VSHUFPS / SHUFPS instruction.\n" "///\n" "/// \\param a\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param b\n" "/// A 128-bit vector of [4 x float].\n" "/// \\param mask\n" "/// An immediate value containing an 8-bit value specifying which elements to\n" "/// copy from \\a a and \\a b. \\n\n" "/// Bits [3:0] specify the values copied from operand \\a a. \\n\n" "/// Bits [7:4] specify the values copied from operand \\a b. \\n\n" "/// The destinations within the 128-bit destination are assigned values as\n" "/// follows: \\n\n" "/// Bits [1:0] are used to assign values to bits [31:0] in the\n" "/// destination. \\n\n" "/// Bits [3:2] are used to assign values to bits [63:32] in the\n" "/// destination. \\n\n" "/// Bits [5:4] are used to assign values to bits [95:64] in the\n" "/// destination. \\n\n" "/// Bits [7:6] are used to assign values to bits [127:96] in the\n" "/// destination. \\n\n" "/// Bit value assignments: \\n\n" "/// 00: Bits [31:0] copied from the specified operand. \\n\n" "/// 01: Bits [63:32] copied from the specified operand. \\n\n" "/// 10: Bits [95:64] copied from the specified operand. \\n\n" "/// 11: Bits [127:96] copied from the specified operand. \\n\n" "/// Note: To generate a mask, you can use the \\c _MM_SHUFFLE macro.\n" "/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form\n" "/// [b6, b4, b2, b0].\n" "/// \\returns A 128-bit vector of [4 x float] containing the shuffled values.\n" "#define _mm_shuffle_ps(a, b, mask) \\\n" " ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \\\n" " (int)(mask)))\n" "\n" "/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of\n" "/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKHPS / UNPCKHPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. \\n\n" "/// Bits [95:64] are written to bits [31:0] of the destination. \\n\n" "/// Bits [127:96] are written to bits [95:64] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float].\n" "/// Bits [95:64] are written to bits [63:32] of the destination. \\n\n" "/// Bits [127:96] are written to bits [127:96] of the destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the interleaved values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_unpackhi_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);\n" "}\n" "\n" "/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of\n" "/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPS / UNPCKLPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit vector of [4 x float]. \\n\n" "/// Bits [31:0] are written to bits [31:0] of the destination. \\n\n" "/// Bits [63:32] are written to bits [95:64] of the destination.\n" "/// \\param __b\n" "/// A 128-bit vector of [4 x float]. \\n\n" "/// Bits [31:0] are written to bits [63:32] of the destination. \\n\n" "/// Bits [63:32] are written to bits [127:96] of the destination.\n" "/// \\returns A 128-bit vector of [4 x float] containing the interleaved values.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_unpacklo_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]. The lower\n" "/// 32 bits are set to the lower 32 bits of the second parameter. The upper\n" "/// 96 bits are set to the upper 96 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VBLENDPS / BLENDPS / MOVSS \n" "/// instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are\n" "/// written to the upper 96 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit floating-point vector of [4 x float]. The lower 32 bits are\n" "/// written to the lower 32 bits of the result.\n" "/// \\returns A 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_move_ss(__m128 __a, __m128 __b)\n" "{\n" " __a[0] = __b[0];\n" " return __a;\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]. The lower\n" "/// 64 bits are set to the upper 64 bits of the second parameter. The upper\n" "/// 64 bits are set to the upper 64 bits of the first parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are\n" "/// written to the upper 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are\n" "/// written to the lower 64 bits of the result.\n" "/// \\returns A 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_movehl_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);\n" "}\n" "\n" "/// Constructs a 128-bit floating-point vector of [4 x float]. The lower\n" "/// 64 bits are set to the lower 64 bits of the first parameter. The upper\n" "/// 64 bits are set to the lower 64 bits of the second parameter.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are\n" "/// written to the lower 64 bits of the result.\n" "/// \\param __b\n" "/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are\n" "/// written to the upper 64 bits of the result.\n" "/// \\returns A 128-bit floating-point vector of [4 x float].\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_movelh_ps(__m128 __a, __m128 __b)\n" "{\n" " return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);\n" "}\n" "\n" "/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x\n" "/// float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [4 x i16]. The elements of the destination are copied\n" "/// from the corresponding elements in this operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and converted\n" "/// values from the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpi16_ps(__m64 __a)\n" "{\n" " __m64 __b, __c;\n" " __m128 __r;\n" "\n" " __b = _mm_setzero_si64();\n" " __b = _mm_cmpgt_pi16(__b, __a);\n" " __c = _mm_unpackhi_pi16(__a, __b);\n" " __r = _mm_setzero_ps();\n" " __r = _mm_cvtpi32_ps(__r, __c);\n" " __r = _mm_movelh_ps(__r, __r);\n" " __c = _mm_unpacklo_pi16(__a, __b);\n" " __r = _mm_cvtpi32_ps(__r, __c);\n" "\n" " return __r;\n" "}\n" "\n" "/// Converts a 64-bit vector of 16-bit unsigned integer values into a\n" "/// 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of 16-bit unsigned integer values. The elements of the\n" "/// destination are copied from the corresponding elements in this operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and converted\n" "/// values from the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpu16_ps(__m64 __a)\n" "{\n" " __m64 __b, __c;\n" " __m128 __r;\n" "\n" " __b = _mm_setzero_si64();\n" " __c = _mm_unpackhi_pi16(__a, __b);\n" " __r = _mm_setzero_ps();\n" " __r = _mm_cvtpi32_ps(__r, __c);\n" " __r = _mm_movelh_ps(__r, __r);\n" " __c = _mm_unpacklo_pi16(__a, __b);\n" " __r = _mm_cvtpi32_ps(__r, __c);\n" "\n" " return __r;\n" "}\n" "\n" "/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]\n" "/// into a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [8 x i8]. The elements of the destination are copied\n" "/// from the corresponding lower 4 elements in this operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and converted\n" "/// values from the operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpi8_ps(__m64 __a)\n" "{\n" " __m64 __b;\n" "\n" " __b = _mm_setzero_si64();\n" " __b = _mm_cmpgt_pi8(__b, __a);\n" " __b = _mm_unpacklo_pi8(__a, __b);\n" "\n" " return _mm_cvtpi16_ps(__b);\n" "}\n" "\n" "/// Converts the lower four unsigned 8-bit integer values from a 64-bit\n" "/// vector of [8 x u8] into a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of unsigned 8-bit integer values. The elements of the\n" "/// destination are copied from the corresponding lower 4 elements in this\n" "/// operand.\n" "/// \\returns A 128-bit vector of [4 x float] containing the copied and converted\n" "/// values from the source operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpu8_ps(__m64 __a)\n" "{\n" " __m64 __b;\n" "\n" " __b = _mm_setzero_si64();\n" " __b = _mm_unpacklo_pi8(__a, __b);\n" "\n" " return _mm_cvtpi16_ps(__b);\n" "}\n" "\n" "/// Converts the two 32-bit signed integer values from each 64-bit vector\n" "/// operand of [2 x i32] into a 128-bit vector of [4 x float].\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 64-bit vector of [2 x i32]. The lower elements of the destination are\n" "/// copied from the elements in this operand.\n" "/// \\param __b\n" "/// A 64-bit vector of [2 x i32]. The upper elements of the destination are\n" "/// copied from the elements in this operand.\n" "/// \\returns A 128-bit vector of [4 x float] whose lower 64 bits contain the\n" "/// copied and converted values from the first operand. The upper 64 bits\n" "/// contain the copied and converted values from the second operand.\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)\n" "{\n" " __m128 __c;\n" "\n" " __c = _mm_setzero_ps();\n" " __c = _mm_cvtpi32_ps(__c, __b);\n" " __c = _mm_movelh_ps(__c, __c);\n" "\n" " return _mm_cvtpi32_ps(__c, __a);\n" "}\n" "\n" "/// Converts each single-precision floating-point element of a 128-bit\n" "/// floating-point vector of [4 x float] into a 16-bit signed integer, and\n" "/// packs the results into a 64-bit integer vector of [4 x i16].\n" "///\n" "/// If the floating-point element is NaN or infinity, or if the\n" "/// floating-point element is greater than 0x7FFFFFFF or less than -0x8000,\n" "/// it is converted to 0x8000. Otherwise if the floating-point element is\n" "/// greater than 0x7FFF, it is converted to 0x7FFF.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float].\n" "/// \\returns A 64-bit integer vector of [4 x i16] containing the converted\n" "/// values.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtps_pi16(__m128 __a)\n" "{\n" " __m64 __b, __c;\n" "\n" " __b = _mm_cvtps_pi32(__a);\n" " __a = _mm_movehl_ps(__a, __a);\n" " __c = _mm_cvtps_pi32(__a);\n" "\n" " return _mm_packs_pi32(__b, __c);\n" "}\n" "\n" "/// Converts each single-precision floating-point element of a 128-bit\n" "/// floating-point vector of [4 x float] into an 8-bit signed integer, and\n" "/// packs the results into the lower 32 bits of a 64-bit integer vector of\n" "/// [8 x i8]. The upper 32 bits of the vector are set to 0.\n" "///\n" "/// If the floating-point element is NaN or infinity, or if the\n" "/// floating-point element is greater than 0x7FFFFFFF or less than -0x80, it\n" "/// is converted to 0x80. Otherwise if the floating-point element is greater\n" "/// than 0x7F, it is converted to 0x7F.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction.\n" "///\n" "/// \\param __a\n" "/// 128-bit floating-point vector of [4 x float].\n" "/// \\returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the\n" "/// converted values and the uppper 32 bits are set to zero.\n" "static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX\n" "_mm_cvtps_pi8(__m128 __a)\n" "{\n" " __m64 __b, __c;\n" "\n" " __b = _mm_cvtps_pi16(__a);\n" " __c = _mm_setzero_si64();\n" "\n" " return _mm_packs_pi16(__b, __c);\n" "}\n" "\n" "/// Extracts the sign bits from each single-precision floating-point\n" "/// element of a 128-bit floating-point vector of [4 x float] and returns the\n" "/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set\n" "/// to zero.\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the VMOVMSKPS / MOVMSKPS instruction.\n" "///\n" "/// \\param __a\n" "/// A 128-bit floating-point vector of [4 x float].\n" "/// \\returns A 32-bit integer value. Bits [3:0] contain the sign bits from each\n" "/// single-precision floating-point element of the parameter. Bits [31:4] are\n" "/// set to zero.\n" "static __inline__ int __DEFAULT_FN_ATTRS\n" "_mm_movemask_ps(__m128 __a)\n" "{\n" " return __builtin_ia32_movmskps((__v4sf)__a);\n" "}\n" "\n" "\n" "#define _MM_ALIGN16 __attribute__((aligned(16)))\n" "\n" "#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))\n" "\n" "#define _MM_EXCEPT_INVALID (0x0001U)\n" "#define _MM_EXCEPT_DENORM (0x0002U)\n" "#define _MM_EXCEPT_DIV_ZERO (0x0004U)\n" "#define _MM_EXCEPT_OVERFLOW (0x0008U)\n" "#define _MM_EXCEPT_UNDERFLOW (0x0010U)\n" "#define _MM_EXCEPT_INEXACT (0x0020U)\n" "#define _MM_EXCEPT_MASK (0x003fU)\n" "\n" "#define _MM_MASK_INVALID (0x0080U)\n" "#define _MM_MASK_DENORM (0x0100U)\n" "#define _MM_MASK_DIV_ZERO (0x0200U)\n" "#define _MM_MASK_OVERFLOW (0x0400U)\n" "#define _MM_MASK_UNDERFLOW (0x0800U)\n" "#define _MM_MASK_INEXACT (0x1000U)\n" "#define _MM_MASK_MASK (0x1f80U)\n" "\n" "#define _MM_ROUND_NEAREST (0x0000U)\n" "#define _MM_ROUND_DOWN (0x2000U)\n" "#define _MM_ROUND_UP (0x4000U)\n" "#define _MM_ROUND_TOWARD_ZERO (0x6000U)\n" "#define _MM_ROUND_MASK (0x6000U)\n" "\n" "#define _MM_FLUSH_ZERO_MASK (0x8000U)\n" "#define _MM_FLUSH_ZERO_ON (0x8000U)\n" "#define _MM_FLUSH_ZERO_OFF (0x0000U)\n" "\n" "#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)\n" "#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)\n" "#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)\n" "#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)\n" "\n" "#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))\n" "#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))\n" "#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))\n" "#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))\n" "\n" "#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \\\n" "do { \\\n" " __m128 tmp3, tmp2, tmp1, tmp0; \\\n" " tmp0 = _mm_unpacklo_ps((row0), (row1)); \\\n" " tmp2 = _mm_unpacklo_ps((row2), (row3)); \\\n" " tmp1 = _mm_unpackhi_ps((row0), (row1)); \\\n" " tmp3 = _mm_unpackhi_ps((row2), (row3)); \\\n" " (row0) = _mm_movelh_ps(tmp0, tmp2); \\\n" " (row1) = _mm_movehl_ps(tmp2, tmp0); \\\n" " (row2) = _mm_movelh_ps(tmp1, tmp3); \\\n" " (row3) = _mm_movehl_ps(tmp3, tmp1); \\\n" "} while (0)\n" "\n" "/* Aliases for compatibility. */\n" "#define _m_pextrw _mm_extract_pi16\n" "#define _m_pinsrw _mm_insert_pi16\n" "#define _m_pmaxsw _mm_max_pi16\n" "#define _m_pmaxub _mm_max_pu8\n" "#define _m_pminsw _mm_min_pi16\n" "#define _m_pminub _mm_min_pu8\n" "#define _m_pmovmskb _mm_movemask_pi8\n" "#define _m_pmulhuw _mm_mulhi_pu16\n" "#define _m_pshufw _mm_shuffle_pi16\n" "#define _m_maskmovq _mm_maskmove_si64\n" "#define _m_pavgb _mm_avg_pu8\n" "#define _m_pavgw _mm_avg_pu16\n" "#define _m_psadbw _mm_sad_pu8\n" "#define _m_ _mm_\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS_MMX\n" "\n" "/* Ugly hack for backwards-compatibility (compatible with gcc) */\n" "#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)\n" "#include \n" "#endif\n" "\n" "#endif /* __XMMINTRIN_H */\n" "" } , { "/builtins/xopintrin.h" , "/*===---- xopintrin.h - XOP intrinsics -------------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __X86INTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XOPINTRIN_H\n" "#define __XOPINTRIN_H\n" "\n" "#include \n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"xop\"), __min_vector_width__(128)))\n" "#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__(\"xop\"), __min_vector_width__(256)))\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddw_epi8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddd_epi8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epi8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddd_epi16(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epi16(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epi32(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddw_epu8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddd_epu8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epu8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddd_epu16(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epu16(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_haddq_epu32(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsubw_epi8(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsubd_epi16(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_hsubq_epi32(__m128i __A)\n" "{\n" " return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C));\n" "}\n" "\n" "static __inline__ __m256i __DEFAULT_FN_ATTRS256\n" "_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)\n" "{\n" " return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C));\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)\n" "{\n" " return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_rot_epi8(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_rot_epi16(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_rot_epi32(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_rot_epi64(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);\n" "}\n" "\n" "#define _mm_roti_epi8(A, N) \\\n" " ((__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)))\n" "\n" "#define _mm_roti_epi16(A, N) \\\n" " ((__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)))\n" "\n" "#define _mm_roti_epi32(A, N) \\\n" " ((__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)))\n" "\n" "#define _mm_roti_epi64(A, N) \\\n" " ((__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)))\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_shl_epi8(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_shl_epi16(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_shl_epi32(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_shl_epi64(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha_epi8(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha_epi16(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha_epi32(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_sha_epi64(__m128i __A, __m128i __B)\n" "{\n" " return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);\n" "}\n" "\n" "#define _mm_com_epu8(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epu16(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \\\n" " (__v8hi)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epu32(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \\\n" " (__v4si)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epu64(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \\\n" " (__v2di)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epi8(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \\\n" " (__v16qi)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epi16(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \\\n" " (__v8hi)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epi32(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \\\n" " (__v4si)(__m128i)(B), (N)))\n" "\n" "#define _mm_com_epi64(A, B, N) \\\n" " ((__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \\\n" " (__v2di)(__m128i)(B), (N)))\n" "\n" "#define _MM_PCOMCTRL_LT 0\n" "#define _MM_PCOMCTRL_LE 1\n" "#define _MM_PCOMCTRL_GT 2\n" "#define _MM_PCOMCTRL_GE 3\n" "#define _MM_PCOMCTRL_EQ 4\n" "#define _MM_PCOMCTRL_NEQ 5\n" "#define _MM_PCOMCTRL_FALSE 6\n" "#define _MM_PCOMCTRL_TRUE 7\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epu8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epu16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epu32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epu64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epi8(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epi16(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epi32(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comlt_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comle_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comgt_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comge_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comeq_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comneq_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comfalse_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE);\n" "}\n" "\n" "static __inline__ __m128i __DEFAULT_FN_ATTRS\n" "_mm_comtrue_epi64(__m128i __A, __m128i __B)\n" "{\n" " return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);\n" "}\n" "\n" "#define _mm_permute2_pd(X, Y, C, I) \\\n" " ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \\\n" " (__v2df)(__m128d)(Y), \\\n" " (__v2di)(__m128i)(C), (I)))\n" "\n" "#define _mm256_permute2_pd(X, Y, C, I) \\\n" " ((__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \\\n" " (__v4df)(__m256d)(Y), \\\n" " (__v4di)(__m256i)(C), (I)))\n" "\n" "#define _mm_permute2_ps(X, Y, C, I) \\\n" " ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \\\n" " (__v4si)(__m128i)(C), (I)))\n" "\n" "#define _mm256_permute2_ps(X, Y, C, I) \\\n" " ((__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \\\n" " (__v8sf)(__m256)(Y), \\\n" " (__v8si)(__m256i)(C), (I)))\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_frcz_ss(__m128 __A)\n" "{\n" " return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_frcz_sd(__m128d __A)\n" "{\n" " return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);\n" "}\n" "\n" "static __inline__ __m128 __DEFAULT_FN_ATTRS\n" "_mm_frcz_ps(__m128 __A)\n" "{\n" " return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);\n" "}\n" "\n" "static __inline__ __m128d __DEFAULT_FN_ATTRS\n" "_mm_frcz_pd(__m128d __A)\n" "{\n" " return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);\n" "}\n" "\n" "static __inline__ __m256 __DEFAULT_FN_ATTRS256\n" "_mm256_frcz_ps(__m256 __A)\n" "{\n" " return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);\n" "}\n" "\n" "static __inline__ __m256d __DEFAULT_FN_ATTRS256\n" "_mm256_frcz_pd(__m256d __A)\n" "{\n" " return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);\n" "}\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "#undef __DEFAULT_FN_ATTRS256\n" "\n" "#endif /* __XOPINTRIN_H */\n" "" } , { "/builtins/xsavecintrin.h" , "/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XSAVECINTRIN_H\n" "#define __XSAVECINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"xsavec\")))\n" "\n" "/// Performs a full or partial save of processor state to the memory at\n" "/// \\a __p. The exact state saved depends on the 64-bit mask \\a __m and\n" "/// processor control register \\c XCR0.\n" "///\n" "/// \\code{.operation}\n" "/// mask[62:0] := __m[62:0] AND XCR0[62:0]\n" "/// FOR i := 0 TO 62\n" "/// IF mask[i] == 1\n" "/// CASE (i) OF\n" "/// 0: save X87 FPU state\n" "/// 1: save SSE state\n" "/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]\n" "/// FI\n" "/// ENDFOR\n" "/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c XSAVEC instruction.\n" "///\n" "/// \\param __p\n" "/// Pointer to the save area; must be 64-byte aligned.\n" "/// \\param __m\n" "/// A 64-bit mask indicating what state should be saved.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsavec(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsavec(__p, __m);\n" "}\n" "\n" "#ifdef __x86_64__\n" "/// Performs a full or partial save of processor state to the memory at\n" "/// \\a __p. The exact state saved depends on the 64-bit mask \\a __m and\n" "/// processor control register \\c XCR0.\n" "///\n" "/// \\code{.operation}\n" "/// mask[62:0] := __m[62:0] AND XCR0[62:0]\n" "/// FOR i := 0 TO 62\n" "/// IF mask[i] == 1\n" "/// CASE (i) OF\n" "/// 0: save X87 FPU state\n" "/// 1: save SSE state\n" "/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]\n" "/// FI\n" "/// ENDFOR\n" "/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])\n" "/// \\endcode\n" "///\n" "/// \\headerfile \n" "///\n" "/// This intrinsic corresponds to the \\c XSAVEC64 instruction.\n" "///\n" "/// \\param __p\n" "/// Pointer to the save area; must be 64-byte aligned.\n" "/// \\param __m\n" "/// A 64-bit mask indicating what state should be saved.\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsavec64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsavec64(__p, __m);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/xsaveintrin.h" , "/*===---- xsaveintrin.h - XSAVE intrinsic ----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XSAVEINTRIN_H\n" "#define __XSAVEINTRIN_H\n" "\n" "#ifdef _MSC_VER\n" "#define _XCR_XFEATURE_ENABLED_MASK 0\n" "#endif\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"xsave\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsave(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsave(__p, __m);\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xrstor(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xrstor(__p, __m);\n" "}\n" "\n" "#ifndef _MSC_VER\n" "#define _xgetbv(A) __builtin_ia32_xgetbv((long long)(A))\n" "#define _xsetbv(A, B) __builtin_ia32_xsetbv((unsigned int)(A), (unsigned long long)(B))\n" "#else\n" "#ifdef __cplusplus\n" "extern \"C\" {\n" "#endif\n" "unsigned __int64 __cdecl _xgetbv(unsigned int);\n" "void __cdecl _xsetbv(unsigned int, unsigned __int64);\n" "#ifdef __cplusplus\n" "}\n" "#endif\n" "#endif /* _MSC_VER */\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsave64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsave64(__p, __m);\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xrstor64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xrstor64(__p, __m);\n" "}\n" "\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/xsaveoptintrin.h" , "/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XSAVEOPTINTRIN_H\n" "#define __XSAVEOPTINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"xsaveopt\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsaveopt(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsaveopt(__p, __m);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsaveopt64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsaveopt64(__p, __m);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/xsavesintrin.h" , "/*===---- xsavesintrin.h - XSAVES intrinsic --------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XSAVESINTRIN_H\n" "#define __XSAVESINTRIN_H\n" "\n" "/* Define the default attributes for the functions in this file. */\n" "#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__(\"xsaves\")))\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsaves(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsaves(__p, __m);\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xrstors(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xrstors(__p, __m);\n" "}\n" "\n" "#ifdef __x86_64__\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xrstors64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xrstors64(__p, __m);\n" "}\n" "\n" "static __inline__ void __DEFAULT_FN_ATTRS\n" "_xsaves64(void *__p, unsigned long long __m) {\n" " __builtin_ia32_xsaves64(__p, __m);\n" "}\n" "#endif\n" "\n" "#undef __DEFAULT_FN_ATTRS\n" "\n" "#endif\n" "" } , { "/builtins/xtestintrin.h" , "/*===---- xtestintrin.h - XTEST intrinsic ----------------------------------===\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------------===\n" " */\n" "\n" "#ifndef __IMMINTRIN_H\n" "#error \"Never use directly; include instead.\"\n" "#endif\n" "\n" "#ifndef __XTESTINTRIN_H\n" "#define __XTESTINTRIN_H\n" "\n" "/* xtest returns non-zero if the instruction is executed within an RTM or active\n" " * HLE region. */\n" "/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is\n" " * supported. */\n" "static __inline__ int\n" " __attribute__((__always_inline__, __nodebug__, __target__(\"rtm\")))\n" " _xtest(void) {\n" " return __builtin_ia32_xtest();\n" "}\n" "\n" "#endif\n" "" } , {} };