diff --git a/ChangeLog.md b/ChangeLog.md index 9f23d262d4f00..d5d6f6e7b89c2 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -18,11 +18,17 @@ to browse the changes between the tags. See docs/process.md for more on how version tagging works. -4.0.1 (in development) +4.0.2 (in development) ---------------------- +- Added support for compiling AVX2 intrinsics, 256-bit wide intrinsic is emulated + on top of 128-bit Wasm SIMD instruction set. (#23035). Pass `-msimd128 -mavx2` + to enable targeting AVX2. - The system JS libraries in `src/` were renamed from `library_foo.js` to `lib/libfoo.js`. They are still included via the same `-lfoo.js` flag so this should not be a user-visible change. (#23348) + +4.0.1 - 01/17/25 +---------------- - The minimum version of node required to run emscripten was bumped from v16.20 to v18. Version 4.0 was mistakenly shipped with a change that required v20, but that was reverted. (#23410) diff --git a/emcc.py b/emcc.py index 54f83646d0618..99fbc105313ee 100644 --- a/emcc.py +++ b/emcc.py @@ -76,7 +76,7 @@ 'fetchSettings' ] -SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-msse4', '-mavx'] +SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-msse4', '-mavx', '-mavx2'] SIMD_NEON_FLAGS = ['-mfpu=neon'] LINK_ONLY_FLAGS = { '--bind', '--closure', '--cpuprofiler', '--embed-file', @@ -474,6 +474,9 @@ def array_contains_any_of(hay, needles): if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[7:]): cflags += ['-D__AVX__=1'] + if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[8:]): + cflags += ['-D__AVX2__=1'] + if array_contains_any_of(user_args, SIMD_NEON_FLAGS): cflags += ['-D__ARM_NEON__=1'] @@ -738,11 +741,11 @@ def phase_parse_arguments(state): def separate_linker_flags(state, newargs): - """Process argument list separating out intput files, compiler flags + """Process argument list separating out input files, compiler flags and linker flags. - Linker flags are stored in state.link_flags - - Input files and compiler-only flags are return as two separate lists. + - Input files and compiler-only flags are returned as two separate lists. Both linker flags and input files are stored as pairs of (i, entry) where `i` is the orginal index in the command line arguments. This allow the two diff --git a/emscripten-version.txt b/emscripten-version.txt index 75cf6d84ba5b5..bebafbbb3a990 100644 --- a/emscripten-version.txt +++ b/emscripten-version.txt @@ -1 +1 @@ -4.0.1-git +4.0.2-git diff --git a/eslint.config.mjs b/eslint.config.mjs index d5386faa4a930..f05b3d29f9c2d 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -54,7 +54,6 @@ export default [{ 'src/settings_internal.js', 'src/growableHeap.js', 'src/emrun_prejs.js', - 'src/arrayUtils.js', 'src/deterministic.js', 'src/base64Decode.js', 'src/proxyWorker.js', diff --git a/site/source/docs/porting/connecting_cpp_and_javascript/embind.rst b/site/source/docs/porting/connecting_cpp_and_javascript/embind.rst index 2a6e693bd5f55..79b821f88a2d4 100644 --- a/site/source/docs/porting/connecting_cpp_and_javascript/embind.rst +++ b/site/source/docs/porting/connecting_cpp_and_javascript/embind.rst @@ -860,7 +860,7 @@ Class properties can be defined several ways as seen below. class_("Person") .constructor<>() // Bind directly to a class member with automatically generated getters/setters using a - // reference return policy so the object does not need to be deleted JS. + // reference return policy so the object does not need to be deleted from JS. .property("location", &Person::location, return_value_policy::reference()) // Same as above, but this will return a copy and the object must be deleted or it will // leak! diff --git a/site/source/docs/porting/simd.rst b/site/source/docs/porting/simd.rst index 5259830e1059e..6409c0811b024 100644 --- a/site/source/docs/porting/simd.rst +++ b/site/source/docs/porting/simd.rst @@ -12,7 +12,7 @@ Emscripten supports the `WebAssembly SIMD 1. Enable LLVM/Clang SIMD autovectorizer to automatically target WebAssembly SIMD, without requiring changes to C/C++ source code. 2. Write SIMD code using the GCC/Clang SIMD Vector Extensions (``__attribute__((vector_size(16)))``) 3. Write SIMD code using the WebAssembly SIMD intrinsics (``#include ``) -4. Compile existing SIMD code that uses the x86 SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2 or AVX intrinsics (``#include <*mmintrin.h>``) +4. Compile existing SIMD code that uses the x86 SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX or AVX2 intrinsics (``#include <*mmintrin.h>``) 5. Compile existing SIMD code that uses the ARM NEON intrinsics (``#include ``) These techniques can be freely combined in a single program. @@ -153,6 +153,7 @@ Emscripten supports compiling existing codebases that use x86 SSE instructions b * **SSE4.1**: pass ``-msse4.1`` and ``#include ``. Use ``#ifdef __SSE4_1__`` to gate code. * **SSE4.2**: pass ``-msse4.2`` and ``#include ``. Use ``#ifdef __SSE4_2__`` to gate code. * **AVX**: pass ``-mavx`` and ``#include ``. Use ``#ifdef __AVX__`` to gate code. +* **AVX2**: pass ``-mavx2`` and ``#include ``. Use ``#ifdef __AVX2__`` to gate code. Currently only the SSE1, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, and AVX instruction sets are supported. Each of these instruction sets add on top of the previous ones, so e.g. when targeting SSE3, the instruction sets SSE1 and SSE2 are also available. @@ -1145,6 +1146,90 @@ The following table highlights the availability and expected performance of diff Only the 128-bit wide instructions from AVX instruction set are listed. The 256-bit wide AVX instructions are emulated by two 128-bit wide instructions. +The following table highlights the availability and expected performance of different AVX2 intrinsics. Refer to `Intel Intrinsics Guide on AVX2 `_. + +.. list-table:: x86 AVX2 intrinsics available via #include and -mavx2 + :widths: 20 30 + :header-rows: 1 + + * - Intrinsic name + - WebAssembly SIMD support + * - _mm_broadcastss_ps + - 💡 emulated with a general shuffle + * - _mm_broadcastsd_pd + - 💡 emulated with a general shuffle + * - _mm_blend_epi32 + - 💡 emulated with a general shuffle + * - _mm_broadcastb_epi8 + - 💡 emulated with a general shuffle + * - _mm_broadcastw_epi16 + - 💡 emulated with a general shuffle + * - _mm_broadcastd_epi32 + - 💡 emulated with a general shuffle + * - _mm_broadcastq_epi64 + - 💡 emulated with a general shuffle + * - _mm256_permutevar8x32_epi32 + - ❌ scalarized + * - _mm256_permute4x64_pd + - 💡 emulated with two general shuffle + * - _mm256_permutevar8x32_ps + - ❌ scalarized + * - _mm256_permute4x64_epi64 + - 💡 emulated with two general shuffle + * - _mm_maskload_epi32 + - ❌ scalarized + * - _mm_maskload_epi64 + - ❌ scalarized + * - _mm_maskstore_epi32 + - ❌ scalarized + * - _mm_maskstore_epi64 + - ❌ scalarized + * - _mm_sllv_epi32 + - ❌ scalarized + * - _mm_sllv_epi64 + - ❌ scalarized + * - _mm_srav_epi32 + - ❌ scalarized + * - _mm_srlv_epi32 + - ❌ scalarized + * - _mm_srlv_epi64 + - ❌ scalarized + * - _mm_mask_i32gather_pd + - ❌ scalarized + * - _mm_mask_i64gather_pd + - ❌ scalarized + * - _mm_mask_i32gather_ps + - ❌ scalarized + * - _mm_mask_i64gather_ps + - ❌ scalarized + * - _mm_mask_i32gather_epi32 + - ❌ scalarized + * - _mm_mask_i64gather_epi32 + - ❌ scalarized + * - _mm_mask_i32gather_epi64 + - ❌ scalarized + * - _mm_mask_i64gather_epi64 + - ❌ scalarized + * - _mm_i32gather_pd + - ❌ scalarized + * - _mm_i64gather_pd + - ❌ scalarized + * - _mm_i32gather_ps + - ❌ scalarized + * - _mm_i64gather_ps + - ❌ scalarized + * - _mm_i32gather_epi32 + - ❌ scalarized + * - _mm_i64gather_epi32 + - ❌ scalarized + * - _mm_i32gather_epi64 + - ❌ scalarized + * - _mm_i64gather_epi64 + - ❌ scalarized + +All the 128-bit wide instructions from AVX2 instruction set are listed. +Only a small part of the 256-bit AVX2 instruction set are listed, most of the +256-bit wide AVX2 instructions are emulated by two 128-bit wide instructions. ====================================================== Compiling SIMD code targeting ARM NEON instruction set diff --git a/src/arrayUtils.js b/src/arrayUtils.js deleted file mode 100644 index a5f7449078f7d..0000000000000 --- a/src/arrayUtils.js +++ /dev/null @@ -1,29 +0,0 @@ -/** - * @license - * Copyright 2017 The Emscripten Authors - * SPDX-License-Identifier: MIT - */ - -/** @type {function(string, boolean=, number=)} */ -function intArrayFromString(stringy, dontAddNull, length) { - var len = length > 0 ? length : lengthBytesUTF8(stringy)+1; - var u8array = new Array(len); - var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); - if (dontAddNull) u8array.length = numBytesWritten; - return u8array; -} - -function intArrayToString(array) { - var ret = []; - for (var i = 0; i < array.length; i++) { - var chr = array[i]; - if (chr > 0xFF) { -#if ASSERTIONS - assert(false, `Character code ${chr} (${String.fromCharCode(chr)}) at offset ${i} not in 0x00-0xFF.`); -#endif - chr &= 0xFF; - } - ret.push(String.fromCharCode(chr)); - } - return ret.join(''); -} diff --git a/src/lib/libstrings.js b/src/lib/libstrings.js index e5a0c03b0b32c..893723cacb9c3 100644 --- a/src/lib/libstrings.js +++ b/src/lib/libstrings.js @@ -4,8 +4,6 @@ * SPDX-License-Identifier: MIT */ -#include "arrayUtils.js" - addToLibrary({ // TextDecoder constructor defaults to UTF-8 #if TEXTDECODER == 2 @@ -256,8 +254,28 @@ addToLibrary({ $intArrayFromString__docs: '/** @type {function(string, boolean=, number=)} */', $intArrayFromString__deps: ['$lengthBytesUTF8', '$stringToUTF8Array'], - $intArrayFromString: intArrayFromString, - $intArrayToString: intArrayToString, + $intArrayFromString: (stringy, dontAddNull, length) => { + var len = length > 0 ? length : lengthBytesUTF8(stringy)+1; + var u8array = new Array(len); + var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); + if (dontAddNull) u8array.length = numBytesWritten; + return u8array; + }, + + $intArrayToString: (array) => { + var ret = []; + for (var i = 0; i < array.length; i++) { + var chr = array[i]; + if (chr > 0xFF) { + #if ASSERTIONS + assert(false, `Character code ${chr} (${String.fromCharCode(chr)}) at offset ${i} not in 0x00-0xFF.`); + #endif + chr &= 0xFF; + } + ret.push(String.fromCharCode(chr)); + } + return ret.join(''); + }, // Given a pointer 'ptr' to a null-terminated ASCII-encoded string in the // emscripten HEAP, returns a copy of that string as a Javascript String diff --git a/src/preamble.js b/src/preamble.js index 2a422c49008ea..fb62ee4b609ae 100644 --- a/src/preamble.js +++ b/src/preamble.js @@ -225,9 +225,6 @@ function initRuntime() { function preMain() { #if STACK_OVERFLOW_CHECK checkStackCookie(); -#endif -#if PTHREADS - if (ENVIRONMENT_IS_PTHREAD) return; // PThreads reuse the runtime from the main thread. #endif <<< ATMAINS >>> callRuntimeCallbacks(__ATMAIN__); @@ -630,11 +627,7 @@ function getBinarySync(file) { async function getWasmBinary(binaryFile) { #if !SINGLE_FILE // If we don't have the binary yet, load it asynchronously using readAsync. - if (!wasmBinary -#if SUPPORT_BASE64_EMBEDDING - || isDataURI(binaryFile) -#endif - ) { + if (!wasmBinary) { // Fetch the binary using readAsync try { var response = await readAsync(binaryFile); diff --git a/src/shell.js b/src/shell.js index 149fd65adbe88..6629250620d6e 100644 --- a/src/shell.js +++ b/src/shell.js @@ -122,7 +122,7 @@ if (ENVIRONMENT_IS_NODE) { #endif // ENVIRONMENT_MAY_BE_NODE #if WASM_WORKERS -var ENVIRONMENT_IS_WASM_WORKER = Module['$ww']; +var ENVIRONMENT_IS_WASM_WORKER = !!Module['$ww']; #endif // --pre-jses are emitted after the Module integration code, so that they can diff --git a/src/shell_minimal.js b/src/shell_minimal.js index 4ddf6b3b0303c..60c7089602559 100644 --- a/src/shell_minimal.js +++ b/src/shell_minimal.js @@ -70,7 +70,7 @@ var ENVIRONMENT_IS_WEB = !ENVIRONMENT_IS_NODE; #endif // ASSERTIONS || PTHREADS #if WASM_WORKERS -var ENVIRONMENT_IS_WASM_WORKER = Module['$ww']; +var ENVIRONMENT_IS_WASM_WORKER = !!Module['$ww']; #endif #if ASSERTIONS && ENVIRONMENT_MAY_BE_NODE && ENVIRONMENT_MAY_BE_SHELL diff --git a/system/include/compat/avx2intrin.h b/system/include/compat/avx2intrin.h new file mode 100644 index 0000000000000..072a5f74c902c --- /dev/null +++ b/system/include/compat/avx2intrin.h @@ -0,0 +1,1798 @@ +/* + * Copyright 2024 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ + +#ifndef __emscripten_immintrin_h__ +#error "Never use directly; include instead." +#endif + +#ifndef __emscripten_avx2intrin_h__ +#define __emscripten_avx2intrin_h__ + +#ifndef __AVX2__ +#error "AVX2 instruction set not enabled" +#endif + +#define _mm256_mpsadbw_epu8(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_mpsadbw_epu8(__a.v1, __b.v1, (__imm) >> 3), \ + _mm_mpsadbw_epu8(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi8(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi8(__a.v0); + ret.v1 = _mm_abs_epi8(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi16(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi16(__a.v0); + ret.v1 = _mm_abs_epi16(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi32(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi32(__a.v0); + ret.v1 = _mm_abs_epi32(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_packs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packs_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packs_epi32(__a.v0, __b.v0); + ret.v1 = _mm_packs_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packus_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packus_epi16(__a.v0, __b.v0); + ret.v1 = _mm_packus_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packus_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packus_epi32(__a.v0, __b.v0); + ret.v1 = _mm_packus_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi8(__a.v0, __b.v0); + ret.v1 = _mm_add_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi16(__a.v0, __b.v0); + ret.v1 = _mm_add_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi32(__a.v0, __b.v0); + ret.v1 = _mm_add_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi64(__a.v0, __b.v0); + ret.v1 = _mm_add_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epi8(__a.v0, __b.v0); + ret.v1 = _mm_adds_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epi16(__a.v0, __b.v0); + ret.v1 = _mm_adds_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epu8(__a.v0, __b.v0); + ret.v1 = _mm_adds_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epu16(__a.v0, __b.v0); + ret.v1 = _mm_adds_epu16(__a.v1, __b.v1); + return ret; +} + +#define _mm256_alignr_epi8(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_alignr_epi8(__a.v1, __b.v1, (__imm)), \ + _mm_alignr_epi8(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_and_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_and_si128(__a.v0, __b.v0); + ret.v1 = _mm_and_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_andnot_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_andnot_si128(__a.v0, __b.v0); + ret.v1 = _mm_andnot_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_avg_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_avg_epu8(__a.v0, __b.v0); + ret.v1 = _mm_avg_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_avg_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_avg_epu16(__a.v0, __b.v0); + ret.v1 = _mm_avg_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_blendv_epi8(__m256i __a, __m256i __b, __m256i __mask) { + __m256i ret; + ret.v0 = _mm_blendv_epi8(__a.v0, __b.v0, __mask.v0); + ret.v1 = _mm_blendv_epi8(__a.v1, __b.v1, __mask.v1); + return ret; +} + +#define _mm256_blend_epi16(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_blend_epi16(__a.v1, __b.v1, (__imm)), \ + _mm_blend_epi16(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi8(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi16(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi32(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi64(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi8(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi16(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi32(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi64(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadd_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadd_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hadd_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadd_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadd_epi32(__a.v0, __b.v0); + ret.v1 = _mm_hadd_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadds_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadds_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hadds_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsub_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsub_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hsub_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsub_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsub_epi32(__a.v0, __b.v0); + ret.v1 = _mm_hsub_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsubs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsubs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hsubs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maddubs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_maddubs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_maddubs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_madd_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_madd_epi16(__a.v0, __b.v0); + ret.v1 = _mm_madd_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi8(__a.v0, __b.v0); + ret.v1 = _mm_max_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi16(__a.v0, __b.v0); + ret.v1 = _mm_max_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi32(__a.v0, __b.v0); + ret.v1 = _mm_max_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu8(__a.v0, __b.v0); + ret.v1 = _mm_max_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu16(__a.v0, __b.v0); + ret.v1 = _mm_max_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu32(__a.v0, __b.v0); + ret.v1 = _mm_max_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi8(__a.v0, __b.v0); + ret.v1 = _mm_min_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi16(__a.v0, __b.v0); + ret.v1 = _mm_min_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi32(__a.v0, __b.v0); + ret.v1 = _mm_min_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu8(__a.v0, __b.v0); + ret.v1 = _mm_min_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu16(__a.v0, __b.v0); + ret.v1 = _mm_min_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu32(__a.v0, __b.v0); + ret.v1 = _mm_min_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ int __attribute__((__always_inline__, __nodebug__)) +_mm256_movemask_epi8(__m256i __a) { + return (_mm_movemask_epi8(__a.v1) << 16) | _mm_movemask_epi8(__a.v0); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi16(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi16(__a); + ret.v1 = _mm_cvtepi8_epi16(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi32(__a); + ret.v1 = _mm_cvtepi8_epi32(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi64(__a); + ret.v1 = _mm_cvtepi8_epi64(_mm_srli_epi32(__a, 16)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi16_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi16_epi32(__a); + ret.v1 = _mm_cvtepi16_epi32(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi16_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi16_epi64(__a); + ret.v1 = _mm_cvtepi16_epi64(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi32_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi32_epi64(__a); + ret.v1 = _mm_cvtepi32_epi64(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi16(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi16(__a); + ret.v1 = _mm_cvtepu8_epi16(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi32(__a); + ret.v1 = _mm_cvtepu8_epi32(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi64(__a); + ret.v1 = _mm_cvtepu8_epi64(_mm_srli_epi32(__a, 16)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu16_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu16_epi32(__a); + ret.v1 = _mm_cvtepu16_epi32(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu16_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu16_epi64(__a); + ret.v1 = _mm_cvtepu16_epi64(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu32_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu32_epi64(__a); + ret.v1 = _mm_cvtepu32_epi64(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mul_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mul_epi32(__a.v0, __b.v0); + ret.v1 = _mm_mul_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhrs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhrs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mulhrs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhi_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhi_epu16(__a.v0, __b.v0); + ret.v1 = _mm_mulhi_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhi_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhi_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mulhi_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mullo_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mullo_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mullo_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mullo_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mullo_epi32(__a.v0, __b.v0); + ret.v1 = _mm_mullo_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mul_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mul_epu32(__a.v0, __b.v0); + ret.v1 = _mm_mul_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_or_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_or_si128(__a.v0, __b.v0); + ret.v1 = _mm_or_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sad_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sad_epu8(__a.v0, __b.v0); + ret.v1 = _mm_sad_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_shuffle_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_shuffle_epi8(__a.v0, __b.v0); + ret.v1 = _mm_shuffle_epi8(__a.v1, __b.v1); + return ret; +} + +#define _mm256_shuffle_epi32(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shuffle_epi32(__a.v1, (__imm)), \ + _mm_shuffle_epi32(__a.v0, (__imm))); \ + }) + +#define _mm256_shufflehi_epi16(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shufflehi_epi16(__a.v1, (__imm)), \ + _mm_shufflehi_epi16(__a.v0, (__imm))); \ + }) + +#define _mm256_shufflelo_epi16(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shufflelo_epi16(__a.v1, (__imm)), \ + _mm_shufflelo_epi16(__a.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi8(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi16(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi32(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi32(__a.v1, __b.v1); + return ret; +} + +#define _mm256_slli_si256(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_slli_si128(__a.v1, (__imm)), \ + _mm_slli_si128(__a.v0, (__imm))); \ + }) + +#define _mm256_bslli_epi128(__A, __imm) _mm256_slli_si256(__A, __imm) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi16(__a.v0, __count); + ret.v1 = _mm_slli_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi16(__a.v0, __count); + ret.v1 = _mm_sll_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi32(__a.v0, __count); + ret.v1 = _mm_slli_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi32(__a.v0, __count); + ret.v1 = _mm_sll_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi64(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi64(__a.v0, __count); + ret.v1 = _mm_slli_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi64(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi64(__a.v0, __count); + ret.v1 = _mm_sll_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srai_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srai_epi16(__a.v0, __count); + ret.v1 = _mm_srai_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sra_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sra_epi16(__a.v0, __count); + ret.v1 = _mm_sra_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srai_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srai_epi32(__a.v0, __count); + ret.v1 = _mm_srai_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sra_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sra_epi32(__a.v0, __count); + ret.v1 = _mm_sra_epi32(__a.v1, __count); + return ret; +} + +#define _mm256_srli_si256(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_srli_si128(__a.v1, (__imm)), \ + _mm_srli_si128(__a.v0, (__imm))); \ + }) + +#define _mm256_bsrli_epi128(a, imm) _mm256_srli_si256(a, imm) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi16(__a.v0, __count); + ret.v1 = _mm_srli_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi16(__a.v0, __count); + ret.v1 = _mm_srl_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi32(__a.v0, __count); + ret.v1 = _mm_srli_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi32(__a.v0, __count); + ret.v1 = _mm_srl_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi64(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi64(__a.v0, __count); + ret.v1 = _mm_srli_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi64(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi64(__a.v0, __count); + ret.v1 = _mm_srl_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi8(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi16(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi32(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi64(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epi8(__a.v0, __b.v0); + ret.v1 = _mm_subs_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_subs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epu8(__a.v0, __b.v0); + ret.v1 = _mm_subs_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epu16(__a.v0, __b.v0); + ret.v1 = _mm_subs_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi8(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi16(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi32(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi64(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi8(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi16(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi32(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi64(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_xor_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_xor_si128(__a.v0, __b.v0); + ret.v1 = _mm_xor_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_stream_load_si256(const void* __V) { + __m256i ret; + ret.v0 = _mm_stream_load_si128((const __m128i*)__V); + ret.v1 = _mm_stream_load_si128((const __m128i*)(((const uint8_t*)__V) + 16)); + return ret; +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastss_ps(__m128 __a) { + return (__m128)wasm_i32x4_shuffle(__a, __a, 0, 0, 0, 0); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastsd_pd(__m128d __a) { + return (__m128d)wasm_i64x2_shuffle(__a, __a, 0, 0); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastss_ps(__m128 __a) { + __m256 ret; + ret.v1 = ret.v0 = _mm_broadcastss_ps(__a); + return ret; +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastsd_pd(__m128d __a) { + __m256d ret; + ret.v1 = ret.v0 = _mm_broadcastsd_pd(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastsi128_si256(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = __a; + return ret; +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) + +#define _mm_blend_epi32(__a, __b, __imm8) \ + __extension__({ \ + (__m128i) __builtin_shufflevector((__i32x4)(__m128i)(__a), \ + (__i32x4)(__m128i)(__b), \ + (((__imm8) & 0x01) ? 4 : 0), \ + (((__imm8) & 0x02) ? 5 : 1), \ + (((__imm8) & 0x04) ? 6 : 2), \ + (((__imm8) & 0x08) ? 7 : 3)); \ + }) + +#define _mm256_blend_epi32(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_blend_epi32(__a.v1, __b.v1, (__imm) >> 4), \ + _mm_blend_epi32(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastb_epi8(__m128i __a) { + return (__m128i)wasm_i8x16_shuffle( + __a, __a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastw_epi16(__m128i __a) { + return (__m128i)wasm_i16x8_shuffle(__a, __a, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastd_epi32(__m128i __a) { + return (__m128i)wasm_i32x4_shuffle(__a, __a, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastq_epi64(__m128i __a) { + return (__m128i)wasm_i64x2_shuffle(__a, __a, 0, 0); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastb_epi8(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastb_epi8(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastw_epi16(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastw_epi16(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastd_epi32(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastd_epi32(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastq_epi64(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastq_epi64(__a); + return ret; +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) { + __m256i ret; + int index[8]; + int lane[8]; + for (int i = 0; i < 4; i++) { + index[i] = ((__i32x4)__b.v0)[i] & 7; + index[i + 4] = ((__i32x4)__b.v1)[i] & 7; + } + + for (int j = 0; j < 8; j++) { + lane[j] = index[j] < 4 ? ((__i32x4)(__a.v0))[index[j]] + : ((__i32x4)(__a.v1))[index[j] - 4]; + } + + ret.v0 = (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); + ret.v1 = (__m128i)wasm_i32x4_make(lane[4], lane[5], lane[6], lane[7]); + return ret; +} + +#define _mm256_permute4x64_pd(__A, __imm) \ + __extension__({ \ + __m256d __a = (__A); \ + _mm256_set_m128d( \ + (__m128d)wasm_i64x2_shuffle( \ + __a.v0, __a.v1, (((__imm) >> 4) & 3), (((__imm) >> 6) & 3)), \ + (__m128d)wasm_i64x2_shuffle( \ + __a.v0, __a.v1, ((__imm) & 3), (((__imm) >> 2) & 3))); \ + }) + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permutevar8x32_ps(__m256 __a, __m256i __b) { + __m256 ret; + int index[8]; + float lane[8]; + for (int i = 0; i < 4; i++) { + index[i] = ((__i32x4)__b.v0)[i] & 7; + index[i + 4] = ((__i32x4)__b.v1)[i] & 7; + } + for (int j = 0; j < 8; j++) { + lane[j] = index[j] < 4 ? ((__f32x4)(__a.v0))[index[j]] + : ((__f32x4)(__a.v1))[index[j] - 4]; + } + ret.v0 = (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); + ret.v1 = (__m128)wasm_f32x4_make(lane[4], lane[5], lane[6], lane[7]); + return ret; +} + +#define _mm256_permute4x64_epi64(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i( \ + wasm_i64x2_shuffle( \ + __a.v0, __a.v1, (((__imm) >> 4) & 3), (((__imm) >> 6) & 3)), \ + wasm_i64x2_shuffle( \ + __a.v0, __a.v1, ((__imm) & 3), (((__imm) >> 2) & 3))); \ + }) + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permute2x128_si256(__m256i __a, __m256i __b, const int imm8) { + __m256i ret; + ret.v0 = __avx_select4i(__a, __b, imm8); + ret.v1 = __avx_select4i(__a, __b, imm8 >> 4); + return ret; +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm256_extracti128_si256(__m256i __a, const int imm8) { + if (imm8 & 0x1) { + return __a.v1; + } else { + return __a.v0; + } +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_inserti128_si256(__m256i __a, __m128i __b, const int imm8) { + __m256i ret = __a; + if (imm8 & 0x1) { + ret.v1 = __b; + } else { + ret.v0 = __b; + } + return ret; +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskload_epi32(int32_t const* __p, __m128i __m) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t mask = ((__i32x4)__m)[i]; + lane[i] = ((mask >> 31) & 0x1) ? __p[i] : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskload_epi64(int64_t const* __p, __m128i __m) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t mask = ((__i64x2)__m)[i]; + lane[i] = ((mask >> 63) & 0x1) ? __p[i] : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskload_epi32(int const* __p, __m256i __m) { + __m256i ret; + ret.v0 = _mm_maskload_epi32(__p, __m.v0); + ret.v1 = _mm_maskload_epi32(((int32_t*)__p) + 4, __m.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskload_epi64(long long const* __p, __m256i __m) { + __m256i ret; + ret.v0 = _mm_maskload_epi64(__p, __m.v0); + ret.v1 = _mm_maskload_epi64(((int64_t*)__p) + 2, __m.v1); + return ret; +} + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_maskstore_epi32(int* __p, __m128i __m, __m128i __a) { + if ((wasm_i32x4_extract_lane(__m, 0) & 0x80000000ull) != 0) + __p[0] = wasm_i32x4_extract_lane((v128_t)__a, 0); + if ((wasm_i32x4_extract_lane(__m, 1) & 0x80000000ull) != 0) + __p[1] = wasm_i32x4_extract_lane((v128_t)__a, 1); + if ((wasm_i32x4_extract_lane(__m, 2) & 0x80000000ull) != 0) + __p[2] = wasm_i32x4_extract_lane((v128_t)__a, 2); + if ((wasm_i32x4_extract_lane(__m, 3) & 0x80000000ull) != 0) + __p[3] = wasm_i32x4_extract_lane((v128_t)__a, 3); +} + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_maskstore_epi64(long long* __p, __m128i __m, __m128i __a) { + if ((wasm_i64x2_extract_lane(__m, 0) & 0x8000000000000000ull) != 0) + __p[0] = wasm_i64x2_extract_lane((v128_t)__a, 0); + if ((wasm_i64x2_extract_lane(__m, 1) & 0x8000000000000000ull) != 0) + __p[1] = wasm_i64x2_extract_lane((v128_t)__a, 1); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm256_maskstore_epi32(int* __p, __m256i __m, __m256i __a) { + _mm_maskstore_epi32(__p, __m.v0, __a.v0); + _mm_maskstore_epi32(((int32_t*)__p) + 4, __m.v1, __a.v1); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm256_maskstore_epi64(long long* __p, __m256i __m, __m256i __a) { + _mm_maskstore_epi64(__p, __m.v0, __a.v0); + _mm_maskstore_epi64(((int64_t*)__p) + 2, __m.v1, __a.v1); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_sllv_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + lane[i] = shift < 32 ? ((__u32x4)__a)[i] << shift : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_sllv_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_sllv_epi32(__a.v0, __count.v0); + ret.v1 = _mm_sllv_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_sllv_epi64(__m128i __a, __m128i __count) { + + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t shift = (uint64_t)((__u64x2)__count)[i]; + lane[i] = shift < 64 ? ((__u64x2)__a)[i] << shift : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_sllv_epi64(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_sllv_epi64(__a.v0, __count.v0); + ret.v1 = _mm_sllv_epi64(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srav_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + shift = shift < 31 ? shift : 31; + lane[i] = ((__i32x4)__a)[i] >> shift; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srav_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srav_epi32(__a.v0, __count.v0); + ret.v1 = _mm_srav_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srlv_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + lane[i] = shift < 32 ? ((__u32x4)__a)[i] >> shift : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srlv_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srlv_epi32(__a.v0, __count.v0); + ret.v1 = _mm_srlv_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srlv_epi64(__m128i __a, __m128i __count) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t shift = ((__u64x2)__count)[i]; + lane[i] = shift < 64 ? ((__u64x2)__a)[i] >> shift : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srlv_epi64(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srlv_epi64(__a.v0, __count.v0); + ret.v1 = _mm_srlv_epi64(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_pd(__m128d src, + const double* base_addr, + __m128i vindex, + __m128d mask, + const int scale) { + double lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + double* addr = + (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f64x2)src)[i]; + } + } + return (__m128d)wasm_f64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_pd(__m256d src, + const double* base_addr, + __m128i vindex, + __m256d mask, + const int scale) { + __m256d ret; + ret.v0 = _mm_mask_i32gather_pd(src.v0, base_addr, vindex, mask.v0, scale); + __m128i vindex1 = (__m128i)wasm_i32x4_shuffle(vindex, vindex, 2, 3, 0, 1); + ret.v1 = _mm_mask_i32gather_pd(src.v1, base_addr, vindex1, mask.v1, scale); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_pd(__m128d src, + const double* base_addr, + __m128i vindex, + __m128d mask, + const int scale) { + double lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + double* addr = + (double*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f64x2)src)[i]; + } + } + return (__m128d)wasm_f64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_pd(__m256d src, + const double* base_addr, + __m256i vindex, + __m256d mask, + const int scale) { + __m256d ret; + ret.v0 = _mm_mask_i64gather_pd(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = _mm_mask_i64gather_pd(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_ps(__m128 src, + const float* base_addr, + __m128i vindex, + __m128 mask, + const int scale) { + float lane[4]; + for (size_t i = 0; i < 4; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + + (int64_t)(((__i32x4)vindex)[i]) * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_ps(__m256 src, + const float* base_addr, + __m256i vindex, + __m256 mask, + const int scale) { + __m256 ret; + ret.v0 = _mm_mask_i32gather_ps(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = _mm_mask_i32gather_ps(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_ps(__m128 src, + const float* base_addr, + __m128i vindex, + __m128 mask, + const int scale) { + float lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], 0, 0); +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_ps(__m128 src, + const float* base_addr, + __m256i vindex, + __m128 mask, + const int scale) { + float lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_epi32(__m128i src, + const int* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_epi32(__m256i src, + const int* base_addr, + __m256i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = + _mm_mask_i32gather_epi32(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = + _mm_mask_i32gather_epi32(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_epi32(__m128i src, + const int* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int32_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], 0, 0); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_epi32(__m128i src, + const int* base_addr, + __m256i vindex, + __m128i mask, + const int scale) { + int32_t lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_epi64(__m128i src, + const long long* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + int64_t* addr = + (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i64x2)src)[i]; + } + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_epi64(__m256i src, + const long long* base_addr, + __m128i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = _mm_mask_i32gather_epi64(src.v0, base_addr, vindex, mask.v0, scale); + __m128i vindex1 = (__m128i)wasm_i32x4_shuffle(vindex, vindex, 2, 3, 0, 1); + ret.v1 = _mm_mask_i32gather_epi64(src.v1, base_addr, vindex1, mask.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_epi64(__m128i src, + const long long* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + int64_t* addr = + (int64_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i64x2)src)[i]; + } + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_epi64(__m256i src, + const long long* base_addr, + __m256i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = + _mm_mask_i64gather_epi64(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = + _mm_mask_i64gather_epi64(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_pd(const double* base_addr, __m128i vindex, const int scale) { + double* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_pd(const double* base_addr, + __m128i vindex, + const int scale) { + __m256d ret; + double* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + ret.v0 = (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); + ret.v1 = (__m128d)wasm_f64x2_make(*lane[2], *lane[3]); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_pd(const double* base_addr, __m128i vindex, const int scale) { + double* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (double*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_pd(const double* base_addr, + __m256i vindex, + const int scale) { + __m256d ret; + ret.v0 = _mm_i64gather_pd(base_addr, vindex.v0, scale); + ret.v1 = _mm_i64gather_pd(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_ps(const float* base_addr, __m128i vindex, const int scale) { + float* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (float*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_ps(const float* base_addr, __m256i vindex, const int scale) { + __m256 ret; + ret.v0 = _mm_i32gather_ps(base_addr, vindex.v0, scale); + ret.v1 = _mm_i32gather_ps(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_ps(const float* base_addr, __m128i vindex, const int scale) { + float* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (float*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], 0, 0); +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_ps(const float* base_addr, __m256i vindex, const int scale) { + float* lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + lane[i] = (float*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_epi32(const int* base_addr, __m128i vindex, const int scale) { + int32_t* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (int32_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_epi32(const int* base_addr, + __m256i vindex, + const int scale) { + __m256i ret; + ret.v0 = _mm_i32gather_epi32(base_addr, vindex.v0, scale); + ret.v1 = _mm_i32gather_epi32(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_epi32(const int* base_addr, __m128i vindex, const int scale) { + int32_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int32_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], 0, 0); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_epi32(const int* base_addr, + __m256i vindex, + const int scale) { + int32_t* lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + lane[i] = + (int32_t*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + int64_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + + __m256i ret; + int64_t* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + ret.v0 = (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); + ret.v1 = (__m128i)wasm_i64x2_make(*lane[2], *lane[3]); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + int64_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_epi64(const long long* base_addr, + __m256i vindex, + const int scale) { + __m256i ret; + ret.v0 = _mm_i64gather_epi64(base_addr, vindex.v0, scale); + ret.v1 = _mm_i64gather_epi64(base_addr, vindex.v1, scale); + return ret; +} + +#endif /* __emscripten_avx2intrin_h__ */ diff --git a/system/include/compat/avxintrin.h b/system/include/compat/avxintrin.h index 45171ebbe5e6e..1dace2dfbdc84 100644 --- a/system/include/compat/avxintrin.h +++ b/system/include/compat/avxintrin.h @@ -4,6 +4,11 @@ * University of Illinois/NCSA Open Source License. Both these licenses can be * found in the LICENSE file. */ + +#ifndef __emscripten_immintrin_h__ +#error "Never use directly; include instead." +#endif + #ifndef __emscripten_avxintrin_h__ #define __emscripten_avxintrin_h__ @@ -11,13 +16,6 @@ #error "AVX instruction set not enabled" #endif -#include -#include -#include -#include -#include -#include - typedef struct { __m128d v0; __m128d v1; diff --git a/system/include/compat/immintrin.h b/system/include/compat/immintrin.h index e78b8d1e846ca..c0ef3e73e528a 100644 --- a/system/include/compat/immintrin.h +++ b/system/include/compat/immintrin.h @@ -7,32 +7,36 @@ #ifndef __emscripten_immintrin_h__ #define __emscripten_immintrin_h__ -#ifdef __AVX__ -#include +#ifdef __SSE__ +#include #endif -#ifdef __SSE4_2__ -#include +#ifdef __SSE2__ +#include #endif -#ifdef __SSE4_1__ -#include +#ifdef __SSE3__ +#include #endif #ifdef __SSSE3__ #include #endif -#ifdef __SSE3__ -#include +#ifdef __SSE4_1__ +#include #endif -#ifdef __SSE2__ -#include +#ifdef __SSE4_2__ +#include #endif -#ifdef __SSE__ -#include +#ifdef __AVX__ +#include +#endif + +#ifdef __AVX2__ +#include #endif #endif /* __emscripten_immintrin_h__ */ diff --git a/test/code_size/hello_wasm_worker_wasm.js b/test/code_size/hello_wasm_worker_wasm.js index 4e7446cc3dfa6..1697ea1218036 100644 --- a/test/code_size/hello_wasm_worker_wasm.js +++ b/test/code_size/hello_wasm_worker_wasm.js @@ -1,4 +1,4 @@ -var b = Module, c = b.$ww, e = b.mem || new WebAssembly.Memory({ +var b = Module, c = !!b.$ww, e = b.mem || new WebAssembly.Memory({ initial: 256, maximum: 256, shared: !0 diff --git a/test/code_size/hello_wasm_worker_wasm.json b/test/code_size/hello_wasm_worker_wasm.json index 70feb27fdb040..cbbfa72fecbbb 100644 --- a/test/code_size/hello_wasm_worker_wasm.json +++ b/test/code_size/hello_wasm_worker_wasm.json @@ -1,12 +1,12 @@ { "a.html": 618, "a.html.gz": 384, - "a.js": 665, - "a.js.gz": 455, + "a.js": 667, + "a.js.gz": 457, "a.ww.js": 115, "a.ww.js.gz": 127, "a.wasm": 1881, "a.wasm.gz": 1068, - "total": 3279, - "total_gz": 2034 + "total": 3281, + "total_gz": 2036 } diff --git a/test/other/codesize/test_codesize_minimal_pthreads.gzsize b/test/other/codesize/test_codesize_minimal_pthreads.gzsize index b52fc8a6e1cae..5255d8eda08aa 100644 --- a/test/other/codesize/test_codesize_minimal_pthreads.gzsize +++ b/test/other/codesize/test_codesize_minimal_pthreads.gzsize @@ -1 +1 @@ -4186 +4184 diff --git a/test/other/codesize/test_codesize_minimal_pthreads.jssize b/test/other/codesize/test_codesize_minimal_pthreads.jssize index 7a53a34d4eeef..546493ea4cf42 100644 --- a/test/other/codesize/test_codesize_minimal_pthreads.jssize +++ b/test/other/codesize/test_codesize_minimal_pthreads.jssize @@ -1 +1 @@ -8728 +8725 diff --git a/test/other/test_unoptimized_code_size.js.size b/test/other/test_unoptimized_code_size.js.size index a9abb660fbe9c..3922a28d5c755 100644 --- a/test/other/test_unoptimized_code_size.js.size +++ b/test/other/test_unoptimized_code_size.js.size @@ -1 +1 @@ -52739 +52732 diff --git a/test/other/test_unoptimized_code_size_no_asserts.js.size b/test/other/test_unoptimized_code_size_no_asserts.js.size index 918c9c91947b0..77da3da6997d3 100644 --- a/test/other/test_unoptimized_code_size_no_asserts.js.size +++ b/test/other/test_unoptimized_code_size_no_asserts.js.size @@ -1 +1 @@ -28487 +28480 diff --git a/test/other/test_unoptimized_code_size_strict.js.size b/test/other/test_unoptimized_code_size_strict.js.size index 8a03cbf8f27f0..1f670c115679b 100644 --- a/test/other/test_unoptimized_code_size_strict.js.size +++ b/test/other/test_unoptimized_code_size_strict.js.size @@ -1 +1 @@ -51522 +51515 diff --git a/test/sse/test_avx2.cpp b/test/sse/test_avx2.cpp new file mode 100644 index 0000000000000..c84161e31270d --- /dev/null +++ b/test/sse/test_avx2.cpp @@ -0,0 +1,354 @@ +/* + * Copyright 2024 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ +// This file uses AVX2 by calling different functions with different interesting +// inputs and prints the results. Use a diff tool to compare the results between +// platforms. + +// immintrin.h must be included before test_sse.h +// clang-format off +#include +#include "test_sse.h" +// clang-format on + +bool testNaNBits = true; + +float* interesting_floats = get_interesting_floats(); +int numInterestingFloats = + sizeof(interesting_floats_) / sizeof(interesting_floats_[0]); +uint32_t* interesting_ints = get_interesting_ints(); +int numInterestingInts = + sizeof(interesting_ints_) / sizeof(interesting_ints_[0]); +double* interesting_doubles = get_interesting_doubles(); +int numInterestingDoubles = + sizeof(interesting_doubles_) / sizeof(interesting_doubles_[0]); + +void test_arithmetic(void) { + Ret_M256i_M256i(__m256i, _mm256_add_epi8); + Ret_M256i_M256i(__m256i, _mm256_add_epi16); + Ret_M256i_M256i(__m256i, _mm256_add_epi32); + Ret_M256i_M256i(__m256i, _mm256_add_epi64); + + Ret_M256i_M256i(__m256i, _mm256_adds_epi8); + Ret_M256i_M256i(__m256i, _mm256_adds_epi16); + Ret_M256i_M256i(__m256i, _mm256_adds_epu8); + Ret_M256i_M256i(__m256i, _mm256_adds_epu16); + + Ret_M256i_M256i(__m256i, _mm256_hadd_epi16); + Ret_M256i_M256i(__m256i, _mm256_hadd_epi32); + Ret_M256i_M256i(__m256i, _mm256_hadds_epi16); + + Ret_M256i_M256i(__m256i, _mm256_hsub_epi16); + Ret_M256i_M256i(__m256i, _mm256_hsub_epi32); + Ret_M256i_M256i(__m256i, _mm256_hsubs_epi16); + + Ret_M256i_M256i(__m256i, _mm256_maddubs_epi16); + Ret_M256i_M256i(__m256i, _mm256_madd_epi16); + + Ret_M256i_M256i(__m256i, _mm256_mul_epi32); + Ret_M256i_M256i(__m256i, _mm256_mulhrs_epi16); + Ret_M256i_M256i(__m256i, _mm256_mulhi_epu16); + Ret_M256i_M256i(__m256i, _mm256_mulhi_epi16); + Ret_M256i_M256i(__m256i, _mm256_mullo_epi16); + Ret_M256i_M256i(__m256i, _mm256_mullo_epi32); + Ret_M256i_M256i(__m256i, _mm256_mul_epu32); + + Ret_M256i_M256i(__m256i, _mm256_sad_epu8); + + Ret_M256i_M256i(__m256i, _mm256_sign_epi8); + Ret_M256i_M256i(__m256i, _mm256_sign_epi16); + Ret_M256i_M256i(__m256i, _mm256_sign_epi32); + + Ret_M256i_M256i(__m256i, _mm256_sub_epi8); + Ret_M256i_M256i(__m256i, _mm256_sub_epi16); + Ret_M256i_M256i(__m256i, _mm256_sub_epi32); + Ret_M256i_M256i(__m256i, _mm256_sub_epi64); + + Ret_M256i_M256i(__m256i, _mm256_subs_epi8); + Ret_M256i_M256i(__m256i, _mm256_subs_epi16); + Ret_M256i_M256i(__m256i, _mm256_subs_epu8); + Ret_M256i_M256i(__m256i, _mm256_subs_epu16); +} + +void test_special_math(void) { + Ret_M256i(__m256i, _mm256_abs_epi8); + Ret_M256i(__m256i, _mm256_abs_epi16); + Ret_M256i(__m256i, _mm256_abs_epi32); + + Ret_M256i_M256i(__m256i, _mm256_max_epi8); + Ret_M256i_M256i(__m256i, _mm256_max_epi16); + Ret_M256i_M256i(__m256i, _mm256_max_epi32); + + Ret_M256i_M256i(__m256i, _mm256_max_epu8); + Ret_M256i_M256i(__m256i, _mm256_max_epu16); + Ret_M256i_M256i(__m256i, _mm256_max_epu32); + + Ret_M256i_M256i(__m256i, _mm256_min_epi8); + Ret_M256i_M256i(__m256i, _mm256_min_epi16); + Ret_M256i_M256i(__m256i, _mm256_min_epi32); + + Ret_M256i_M256i(__m256i, _mm256_min_epu8); + Ret_M256i_M256i(__m256i, _mm256_min_epu16); + Ret_M256i_M256i(__m256i, _mm256_min_epu32); +} + +void test_logical(void) { + Ret_M256i_M256i(__m256i, _mm256_and_si256); + Ret_M256i_M256i(__m256i, _mm256_andnot_si256); + Ret_M256i_M256i(__m256i, _mm256_or_si256); + Ret_M256i_M256i(__m256i, _mm256_xor_si256); +} + +// Split test_swizzle to reduce memory consumption +void test_swizzle1(void) { + Ret_M256i_M256i_M256i(__m256i, _mm256_blendv_epi8); + Ret_M256i_M256i_Tint(__m256i, _mm256_blend_epi16); + + Ret_M256i_M256i(__m256i, _mm256_shuffle_epi8); + Ret_M256i_Tint(__m256i, _mm256_shuffle_epi32); + Ret_M256i_Tint(__m256i, _mm256_shufflehi_epi16); + Ret_M256i_Tint(__m256i, _mm256_shufflelo_epi16); +} + +void test_swizzle2(void) { + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi8); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi16); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi32); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi64); + + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi8); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi16); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi32); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi64); + + Ret_M128(__m128, _mm_broadcastss_ps); + Ret_M128d(__m128d, _mm_broadcastsd_pd); + Ret_M128(__m256, _mm256_broadcastss_ps); + Ret_M128d(__m256d, _mm256_broadcastsd_pd); + Ret_M128i(__m256i, _mm256_broadcastsi128_si256); + Ret_M128i(__m256i, _mm_broadcastsi128_si256); + + Ret_M128i_M128i_Tint(__m128i, _mm_blend_epi32); + Ret_M256i_M256i_Tint(__m256i, _mm256_blend_epi32); + + Ret_M128i(__m128i, _mm_broadcastb_epi8); + Ret_M128i(__m128i, _mm_broadcastw_epi16); + Ret_M128i(__m128i, _mm_broadcastd_epi32); + Ret_M128i(__m128i, _mm_broadcastq_epi64); + Ret_M128i(__m256i, _mm256_broadcastb_epi8); + Ret_M128i(__m256i, _mm256_broadcastw_epi16); + Ret_M128i(__m256i, _mm256_broadcastd_epi32); + Ret_M128i(__m256i, _mm256_broadcastq_epi64); + + Ret_M256i_M256i(__m256i, _mm256_permutevar8x32_epi32); + Ret_M256_M256i(__m256, _mm256_permutevar8x32_ps); + Ret_M256i_Tint(__m256i, _mm256_permute4x64_epi64); +} + +void test_swizzle3(void) { + Ret_M256d_Tint(__m256d, _mm256_permute4x64_pd); + Ret_M256i_M256i_Tint(__m256i, _mm256_permute2x128_si256); + + Ret_M256i_Tint(__m128i, _mm256_extracti128_si256); + Ret_M256i_M128i_Tint(__m256i, _mm256_inserti128_si256); +} + +void test_swizzle(void) { + test_swizzle1(); + test_swizzle2(); + test_swizzle3(); +} + +void test_convert(void) { + Ret_M128i(__m256i, _mm256_cvtepi8_epi16); + Ret_M128i(__m256i, _mm256_cvtepi8_epi32); + Ret_M128i(__m256i, _mm256_cvtepi8_epi64); + Ret_M128i(__m256i, _mm256_cvtepi16_epi32); + Ret_M128i(__m256i, _mm256_cvtepi16_epi64); + Ret_M128i(__m256i, _mm256_cvtepi32_epi64); + + Ret_M128i(__m256i, _mm256_cvtepu8_epi16); + Ret_M128i(__m256i, _mm256_cvtepu8_epi32); + Ret_M128i(__m256i, _mm256_cvtepu8_epi64); + Ret_M128i(__m256i, _mm256_cvtepu16_epi32); + Ret_M128i(__m256i, _mm256_cvtepu16_epi64); + Ret_M128i(__m256i, _mm256_cvtepu32_epi64); +} + +void test_compare(void) { + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi8); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi16); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi32); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi64); + + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi8); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi16); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi32); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi64); +} + +void test_misc(void) { + Ret_M256i_M256i_Tint(__m256i, _mm256_mpsadbw_epu8); + + Ret_M256i_M256i(__m256i, _mm256_packs_epi16); + Ret_M256i_M256i(__m256i, _mm256_packs_epi32); + Ret_M256i_M256i(__m256i, _mm256_packus_epi16); + Ret_M256i_M256i(__m256i, _mm256_packus_epi32); + + Ret_M256i_M256i_Tint(__m256i, _mm256_alignr_epi8); + + Ret_M256i(int, _mm256_movemask_epi8); +} + +// Split test_load to reduce memory consumption +void test_load1(void) { + Ret_IntPtr(__m256i, _mm256_stream_load_si256, __m256i*, 8, 8); + + Ret_IntPtr_M128i(__m128i, _mm_maskload_epi32, int32_t*, 4, 4); + Ret_IntPtr_M256i(__m256i, _mm256_maskload_epi32, int32_t*, 8, 8); + Ret_IntPtr_M128i(__m128i, _mm_maskload_epi64, long long const*, 4, 4); + Ret_IntPtr_M256i(__m256i, _mm256_maskload_epi64, long long const*, 8, 8); + + Ret_M128d_DoublePtr_I32x4_M128d_Tint_body(__m128d, _mm_mask_i32gather_pd, 8); + Ret_M256d_DoublePtr_I32x4_M256d_Tint_body( + __m256d, _mm256_mask_i32gather_pd, 8); + Ret_M128d_DoublePtr_I64x2_M128d_Tint_body(__m128d, _mm_mask_i64gather_pd, 8); + Ret_M256d_DoublePtr_I64x4_M256d_Tint_body( + __m256d, _mm256_mask_i64gather_pd, 8); + + Ret_M128_FloatPtr_I32x4_M128_Tint_body(__m128, _mm_mask_i32gather_ps, 4); + Ret_M256_FloatPtr_I32x8_M256_Tint_body(__m256, _mm256_mask_i32gather_ps, 4); + Ret_M128_FloatPtr_I64x2_M128_Tint_body(__m128, _mm_mask_i64gather_ps, 4); + Ret_M128_FloatPtr_I64x4_M128_Tint_body(__m128, _mm256_mask_i64gather_ps, 4); + + Ret_M128i_Int32Ptr_I32x4_M128i_Tint_body( + __m128i, _mm_mask_i32gather_epi32, 4); + Ret_M256i_Int32Ptr_I32x8_M256i_Tint_body( + __m256i, _mm256_mask_i32gather_epi32, 4); + Ret_M128i_Int32Ptr_I64x2_M128i_Tint_body( + __m128i, _mm_mask_i64gather_epi32, 4); + Ret_M128i_Int32Ptr_I64x4_M128i_Tint_body( + __m128i, _mm256_mask_i64gather_epi32, 4); + + Ret_M128i_Int64Ptr_I32x4_M128i_Tint_body( + __m128i, _mm_mask_i32gather_epi64, 8); + Ret_M256i_Int64Ptr_I32x4_M256i_Tint_body( + __m256i, _mm256_mask_i32gather_epi64, 8); + Ret_M128i_Int64Ptr_I64x2_M128i_Tint_body( + __m128i, _mm_mask_i64gather_epi64, 8); + Ret_M256i_Int64Ptr_I64x4_M256i_Tint_body( + __m256i, _mm256_mask_i64gather_epi64, 8); +} + +void test_load2(void) { + Ret_DoublePtr_I32x4_Tint_body(__m128d, _mm_i32gather_pd, 8); + Ret_DoublePtr_I32x4_Tint_body(__m256d, _mm256_i32gather_pd, 8); + Ret_DoublePtr_I64x2_Tint_body(__m128d, _mm_i64gather_pd, 8); + Ret_DoublePtr_I64x4_Tint_body(__m256d, _mm256_i64gather_pd, 8); + + Ret_FloatPtr_I32x4_Tint_body(__m128, _mm_i32gather_ps, 4); + Ret_FloatPtr_I32x8_Tint_body(__m256, _mm256_i32gather_ps, 4); + Ret_FloatPtr_I64x2_Tint_body(__m128, _mm_i64gather_ps, 4); + Ret_FloatPtr_I64x4_Tint_body(__m128, _mm256_i64gather_ps, 4); + + Ret_IntPtr_I32x4_Tint_body(__m128i, _mm_i32gather_epi32, 4); + Ret_IntPtr_I32x8_Tint_body(__m256i, _mm256_i32gather_epi32, 4); + Ret_IntPtr_I64x2_Tint_body(__m128i, _mm_i64gather_epi32, 4); + Ret_IntPtr_I64x4_Tint_body(__m128i, _mm256_i64gather_epi32, 4); + + Ret_Int64Ptr_I32x4_Tint_body(__m128i, _mm_i32gather_epi64, 8); + Ret_Int64Ptr_I32x4_Tint_body(__m256i, _mm256_i32gather_epi64, 8); + Ret_Int64Ptr_I64x2_Tint_body(__m128i, _mm_i64gather_epi64, 8); + Ret_Int64Ptr_I64x4_Tint_body(__m256i, _mm256_i64gather_epi64, 8); +} + +void test_load(void) { + test_load1(); + test_load2(); +} + +void test_store(void) { + void_OutIntPtr_M128i_M128i(_mm_maskstore_epi32, int*, 16, 4); + void_OutIntPtr_M256i_M256i(_mm256_maskstore_epi32, int*, 32, 4); + void_OutIntPtr_M128i_M128i(_mm_maskstore_epi64, long long*, 16, 8); + void_OutIntPtr_M256i_M256i(_mm256_maskstore_epi64, long long*, 16, 8); +} + +void test_statisticsa(void) { + Ret_M256i_M256i(__m256i, _mm256_avg_epu16); + Ret_M256i_M256i(__m256i, _mm256_avg_epu8); +} + +// Split test_shift to reduce memory consumption +void test_shift1(void) { + Ret_M256i_Tint(__m256i, _mm256_slli_si256); + Ret_M256i_Tint(__m256i, _mm256_bslli_epi128); + + Ret_M256i_Tint(__m256i, _mm256_slli_epi16); + Ret_M256i_Tint(__m256i, _mm256_slli_epi32); +} + +void test_shift2(void) { + Ret_M256i_Tint(__m256i, _mm256_slli_epi64); + Ret_M256i_M128i(__m256i, _mm256_sll_epi16); + Ret_M256i_M128i(__m256i, _mm256_sll_epi32); + Ret_M256i_M128i(__m256i, _mm256_sll_epi64); + + Ret_M256i_Tint(__m256i, _mm256_srai_epi16); + Ret_M256i_Tint(__m256i, _mm256_srai_epi32); + Ret_M256i_M128i(__m256i, _mm256_sra_epi16); + Ret_M256i_M128i(__m256i, _mm256_sra_epi32); + + Ret_M256i_Tint(__m256i, _mm256_srli_si256); +} + +void test_shift3(void) { + Ret_M256i_Tint(__m256i, _mm256_bsrli_epi128); + + Ret_M256i_Tint(__m256i, _mm256_srli_epi16); + Ret_M256i_Tint(__m256i, _mm256_srli_epi32); + Ret_M256i_Tint(__m256i, _mm256_srli_epi64); + Ret_M256i_M128i(__m256i, _mm256_srl_epi16); + Ret_M256i_M128i(__m256i, _mm256_srl_epi32); + Ret_M256i_M128i(__m256i, _mm256_srl_epi64); + + Ret_M128i_M128i(__m128i, _mm_sllv_epi32); + Ret_M256i_M256i(__m256i, _mm256_sllv_epi32); + Ret_M128i_M128i(__m128i, _mm_sllv_epi64); + Ret_M256i_M256i(__m256i, _mm256_sllv_epi64); + + Ret_M128i_M128i(__m128i, _mm_srav_epi32); + Ret_M256i_M256i(__m256i, _mm256_srav_epi32); + + Ret_M128i_M128i(__m128i, _mm_srlv_epi32); + Ret_M256i_M256i(__m256i, _mm256_srlv_epi32); + Ret_M128i_M128i(__m128i, _mm_srlv_epi64); + Ret_M256i_M256i(__m256i, _mm256_srlv_epi64); +} + +void test_shift(void) { + test_shift1(); + test_shift2(); + test_shift3(); +} + +int main() { + assert(numInterestingFloats % 8 == 0); + assert(numInterestingInts % 8 == 0); + assert(numInterestingDoubles % 4 == 0); + + test_arithmetic(); + test_special_math(); + test_logical(); + test_swizzle(); + test_convert(); + test_compare(); + test_misc(); + test_load(); + test_store(); + test_statisticsa(); + test_shift(); +} diff --git a/test/sse/test_sse.h b/test/sse/test_sse.h index f1660862a4fd1..e8a1ff608d199 100644 --- a/test/sse/test_sse.h +++ b/test/sse/test_sse.h @@ -967,16 +967,20 @@ double *getTempOutDoubleStore(int alignmentBytes) { return (double*)getTempOutFl printf("%s(%s) = %s\n", #func, str, str2); \ } -#define Ret_FloatPtr_M128i(Ret_type, func, numElemsAccessed, inc) \ - for(int i = 0; i+numElemsAccessed <= numInterestingFloats; i += inc) \ - for(int j = 0; j < numInterestingInts / 4; ++j) \ - { \ - float *ptr = interesting_floats + i; \ - __m128i m1 = (__m128i)E1_Int(interesting_ints, j*4, numInterestingInts); \ - Ret_type ret = func(ptr, m1); \ - char str[256]; tostr(ptr, numElemsAccessed, str); \ - char str2[256]; tostr(&ret, str2); \ - printf("%s(%s) = %s\n", #func, str, str2); \ +#define Ret_FloatPtr_M128i(Ret_type, func, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingFloats; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + float* ptr = interesting_floats + i; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func(ptr, m1); \ + char str[256]; \ + tostr(ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ } #define Ret_Float4(Ret_type, func, inc) \ @@ -1559,8 +1563,10 @@ void tostr_approx(__m256* m, char* outstr, bool approximate) { char str[256]; \ tostr(ptr, numElemsAccessed, str); \ char str2[256]; \ - tostr(&ret, str2); \ - printf("%s(%s) = %s\n", #func, str, str2); \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ } #define Ret_FloatPtr_M256i(Ret_type, func, numElemsAccessed, inc) \ @@ -2157,3 +2163,867 @@ void tostr_approx(__m256* m, char* outstr, bool approximate) { } #endif + +#ifdef __AVX2__ + +#define Ret_M256i_M256i_M256i(Ret_type, func) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + tmp = (__m128i)E2_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + tmp = (__m128i)E1_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(m1, m2, m3); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(&m3, str3); \ + char str4[256]; \ + tostr(&ret, str4); \ + printf("%s(%s, %s, %s) = %s\n", #func, str, str2, str3, str4); \ + } + +#define Ret_M256i_M128i(Ret_type, func) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + __m128i m2 = \ + (__m128i)E2_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func(m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define Ret_IntPtr_M128i(Ret_type, func, Ptr_type, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingInts; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + uint32_t* ptr = interesting_ints + i; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func((Ptr_type)ptr, m1); \ + char str[256]; \ + tostr((int*)ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define Ret_IntPtr_M256i(Ret_type, func, Ptr_type, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingInts; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + uint32_t* ptr = interesting_ints + i; \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func((Ptr_type)ptr, m1); \ + char str[256]; \ + tostr((int*)ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define void_OutIntPtr_M128i_M128i( \ + func, Ptr_type, numBytesWritten, alignmentBytes) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int offset = 0; offset < numBytesWritten; offset += alignmentBytes) \ + for (int k = 0; k < 4; ++k) { \ + uintptr_t base = (uintptr_t)getTempOutIntStore(16); \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m128i m2 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + align1_int* out = (align1_int*)(base + offset); \ + func((Ptr_type)out, m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(out, (numBytesWritten + sizeof(int) - 1) / sizeof(int), str3); \ + printf( \ + "%s(p:align=%d, %s, %s) = %s\n", #func, offset, str, str2, str3); \ + } + +#define void_OutIntPtr_M256i_M256i( \ + func, Ptr_type, numBytesWritten, alignmentBytes) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int offset = 0; offset < numBytesWritten; offset += alignmentBytes) \ + for (int k = 0; k < 4; ++k) { \ + uintptr_t base = (uintptr_t)getTempOutIntStore(16); \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + align1_int* out = (align1_int*)(base + offset); \ + func((Ptr_type)out, m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(out, (numBytesWritten + sizeof(int) - 1) / sizeof(int), str3); \ + printf( \ + "%s(p:align=%d, %s, %s) = %s\n", #func, offset, str, str2, str3); \ + } + +// Generate random 32x4 index +__m128i GenRandom32BitIndex(int i, int n, int prime) { + return _mm_set_epi32((i * prime) % n, + ((i + 1) * prime) % n, + ((i + 2) * prime) % n, + ((i + 3) * prime) % n); +} + +// Generate random 64x2 index +__m128i GenRandom64BitIndex(int i, int n, int prime) { + return _mm_set_epi64x((i * prime) % n, ((i + 3) * prime) % n); +} + +#define Ret_DoublePtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_DoublePtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_DoublePtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I32x8_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i tmp = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I32x8_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i tmp = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int*)interesting_ints; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_M128d_DoublePtr_I32x4_M128d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d m1 = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + __m128d m3 = \ + E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256d_DoublePtr_I32x4_M256d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d tmp = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m256d m1 = _mm256_set_m128d(tmp, tmp); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + tmp = E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + __m256d m3 = _mm256_set_m128d(tmp, tmp); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128d_DoublePtr_I64x2_M128d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d m1 = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m128d m3 = \ + E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256d_DoublePtr_I64x4_M256d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d tmp = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m256d m1 = _mm256_set_m128d(tmp, tmp); \ + __m128i tmp2 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + tmp = E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + __m256d m3 = _mm256_set_m128d(tmp, tmp); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I32x4_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256_FloatPtr_I32x8_M256_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 tmp = \ + E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m256 m1 = _mm256_set_m128(tmp, tmp); \ + __m128i tmp2 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128 tmp3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + __m256 m3 = _mm256_set_m128(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I64x2_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I64x4_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I32x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int32Ptr_I32x8_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i tmp2 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I64x2_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I64x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int64Ptr_I32x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int64Ptr_I32x4_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int64Ptr_I64x2_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int64Ptr_I64x4_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i tmp2 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#endif diff --git a/test/test_core.py b/test/test_core.py index 12465010b4e0b..9637f15a51c82 100644 --- a/test/test_core.py +++ b/test/test_core.py @@ -5853,14 +5853,11 @@ def test_fs_stat_unnamed_file_descriptor(self): @crossplatform @with_all_fs def test_fs_symlink_resolution(self): - nodefs = '-DNODEFS' in self.emcc_args or '-DNODERAWFS' in self.emcc_args if self.get_setting('WASMFS'): self.set_setting('FORCE_FILESYSTEM') - if nodefs: - if WINDOWS: - self.skipTest('No symlinks on Windows') - if self.get_setting('WASMFS'): - self.skipTest('NODEFS in WasmFS') + nodefs = '-DNODEFS' in self.emcc_args or '-DNODERAWFS' in self.emcc_args + if nodefs and WINDOWS: + self.skipTest('No symlinks on Windows') self.do_runf('fs/test_fs_symlink_resolution.c', 'success') @with_all_fs @@ -5907,11 +5904,8 @@ def test_sigaction_default(self, signal, exit_code, assert_identical): @crossplatform @with_all_fs def test_unistd_access(self): - nodefs = '-DNODEFS' in self.emcc_args or '-DNODERAWFS' in self.emcc_args if self.get_setting('WASMFS'): self.set_setting('FORCE_FILESYSTEM') - if nodefs: - self.skipTest('NODEFS in WasmFS') # On windows we have slighly different output because we the same # level of permissions are not available. For example, on windows # its not possible have a file that is not readable, but writable. @@ -5946,11 +5940,8 @@ def test_unistd_dup(self): @with_all_fs def test_unistd_truncate(self): - nodefs = '-DNODEFS' in self.emcc_args or '-DNODERAWFS' in self.emcc_args if self.get_setting('WASMFS'): self.set_setting('FORCE_FILESYSTEM') - if nodefs: - self.skipTest('TODO: NODEFS in WasmFS') if WINDOWS or os.geteuid() == 0: self.skipTest('Root access invalidates this test by being able to write on readonly files') self.do_run_in_out_file_test('unistd/truncate.c') @@ -5982,10 +5973,6 @@ def test_unistd_sysconf_phys_pages(self): @no_windows('https://github.com/emscripten-core/emscripten/issues/8882') @with_all_fs def test_unistd_unlink(self): - nodefs = '-DNODEFS' in self.emcc_args or '-DNODERAWFS' in self.emcc_args - if self.get_setting('WASMFS') and nodefs: - self.skipTest('NODEFS in WasmFS') - # symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges) # so skip testing those bits on that combination. if '-DNODEFS' in self.emcc_args: @@ -6035,8 +6022,6 @@ def test_unistd_symlink_on_nodefs(self): @also_with_nodefs def test_unistd_io(self): if self.get_setting('WASMFS'): - if '-DNODEFS' in self.emcc_args: - self.skipTest('NODEFS in WasmFS') self.set_setting('FORCE_FILESYSTEM') self.do_run_in_out_file_test('unistd/io.c') @@ -6575,6 +6560,25 @@ def test_avx(self, args): self.maybe_closure() self.do_runf(src, native_result) + # Tests invoking the SIMD API via x86 AVX2 avx2intrin.h header (_mm_x()/_mm256_x() functions) + @wasm_simd + @requires_native_clang + @is_slow_test + @no_asan('local count too large') + @no_ubsan('local count too large') + @parameterized({ + '': ([],), + 'nontrapping': (['-mnontrapping-fptoint'],) + }) + def test_avx2(self, args): + src = test_file('sse/test_avx2.cpp') + self.run_process([shared.CLANG_CXX, src, '-mavx2', '-Wno-argument-outside-range', '-Wpedantic', '-o', 'test_avx2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE) + native_result = self.run_process('./test_avx2', stdout=PIPE).stdout + + self.emcc_args += ['-I' + test_file('sse'), '-mavx2', '-Wno-argument-outside-range', '-sSTACK_SIZE=1MB'] + args + self.maybe_closure() + self.do_runf(src, native_result) + @wasm_simd def test_sse_diagnostics(self): self.emcc_args.remove('-Werror') diff --git a/test/test_other.py b/test/test_other.py index 1f1c3ae61c5bf..62558fc613206 100644 --- a/test/test_other.py +++ b/test/test_other.py @@ -4924,7 +4924,7 @@ def test_jslib_include(self): ''') create_file('foo.js', ''' // Include a file from system directory - #include "arrayUtils.js" + #include "IDBStore.js" // Include a local file. #include "inc.js" ''') @@ -9567,7 +9567,8 @@ def test_standalone_system_headers(self): print('header: ' + header) # These headers cannot be included in isolation. # e.g: error: unknown type name 'EGLDisplay' - if header in ['eglext.h', 'SDL_config_macosx.h', 'glext.h', 'gl2ext.h']: + # Don't include avxintrin.h and avx2inrin.h directly, include immintrin.h instead + if header in ['eglext.h', 'SDL_config_macosx.h', 'glext.h', 'gl2ext.h', 'avxintrin.h', 'avx2intrin.h']: continue # These headers are C++ only and cannot be included from C code. # But we still want to check they can be included on there own without @@ -9581,7 +9582,9 @@ def test_standalone_system_headers(self): if directory and directory != 'compat': header = f'{directory}/{header}' inc = f'#include <{header}>\n__attribute__((weak)) int foo;\n' - cflags = ['-Werror', '-Wall', '-pedantic', '-mavx', '-msimd128', '-msse3'] + cflags = ['-Werror', '-Wall', '-pedantic', '-msimd128', '-msse4'] + if header == 'immintrin.h': + cflags.append('-mavx2') if cxx_only: create_file('a.cxx', inc) create_file('b.cxx', inc) diff --git a/test/wasm_worker/wasm_worker_self_id.c b/test/wasm_worker/wasm_worker_self_id.c index d846f9ee51f4f..ea637e5a2e5dc 100644 --- a/test/wasm_worker/wasm_worker_self_id.c +++ b/test/wasm_worker/wasm_worker_self_id.c @@ -17,6 +17,7 @@ void test_success() { } void worker1_main() { + assert(emscripten_current_thread_is_wasm_worker()); assert(emscripten_wasm_worker_self_id() != 0); assert(emscripten_wasm_worker_self_id() == worker1); if (emscripten_wasm_worker_self_id() == worker1) { @@ -25,6 +26,7 @@ void worker1_main() { } void worker2_main() { + assert(emscripten_current_thread_is_wasm_worker()); assert(emscripten_wasm_worker_self_id() != 0); assert(emscripten_wasm_worker_self_id() == worker2); if (emscripten_wasm_worker_self_id() == worker2) { @@ -36,6 +38,7 @@ char stack1[1024]; char stack2[1024]; int main() { + assert(!emscripten_current_thread_is_wasm_worker()); assert(emscripten_wasm_worker_self_id() == 0); worker1 = emscripten_create_wasm_worker(stack1, sizeof(stack1)); worker2 = emscripten_create_wasm_worker(stack2, sizeof(stack2));