summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/bx
diff options
context:
space:
mode:
author Miodrag Milanovic <mmicko@gmail.com>2016-08-06 14:20:16 +0200
committer Miodrag Milanovic <mmicko@gmail.com>2016-08-06 14:20:16 +0200
commit3917850b6197950e5671f4d3b9cb0b9c921b798d (patch)
tree503b9690868a7c238ba10d73b5c6bece7ca8c338 /3rdparty/bx
parent057474989c1a92e9e935dbc8cb9d233ba3a4ea3a (diff)
Update BX and BGFX (nw)
Diffstat (limited to '3rdparty/bx')
-rw-r--r--3rdparty/bx/include/bx/crtimpl.h4
-rw-r--r--3rdparty/bx/include/bx/float4_langext.h482
-rw-r--r--3rdparty/bx/include/bx/float4_neon.h525
-rw-r--r--3rdparty/bx/include/bx/float4_ni.h509
-rw-r--r--3rdparty/bx/include/bx/float4_sse.h461
-rw-r--r--3rdparty/bx/include/bx/float4_swizzle.inl266
-rw-r--r--3rdparty/bx/include/bx/float4_t.h35
-rw-r--r--3rdparty/bx/include/bx/float4x4_t.h242
-rw-r--r--3rdparty/bx/include/bx/fpumath.h26
-rw-r--r--3rdparty/bx/include/bx/handlealloc.h4
-rw-r--r--3rdparty/bx/include/bx/macros.h8
-rw-r--r--3rdparty/bx/include/bx/os.h10
-rw-r--r--3rdparty/bx/include/bx/platform.h1
-rw-r--r--3rdparty/bx/include/bx/readerwriter.h8
-rw-r--r--3rdparty/bx/include/bx/simd128_langext.inl515
-rw-r--r--3rdparty/bx/include/bx/simd128_neon.inl562
-rw-r--r--3rdparty/bx/include/bx/simd128_ref.inl (renamed from 3rdparty/bx/include/bx/float4_ref.h)386
-rw-r--r--3rdparty/bx/include/bx/simd128_sse.inl647
-rw-r--r--3rdparty/bx/include/bx/simd128_swizzle.inl266
-rw-r--r--3rdparty/bx/include/bx/simd256_avx.inl9
-rw-r--r--3rdparty/bx/include/bx/simd256_ref.inl9
-rw-r--r--3rdparty/bx/include/bx/simd_ni.inl558
-rw-r--r--3rdparty/bx/include/bx/simd_t.h438
-rw-r--r--3rdparty/bx/include/bx/string.h2
-rw-r--r--3rdparty/bx/scripts/bx.lua1
-rw-r--r--3rdparty/bx/scripts/toolchain.lua10
-rw-r--r--3rdparty/bx/tests/float4_t.cpp309
-rw-r--r--3rdparty/bx/tests/simd_t.cpp309
-rw-r--r--3rdparty/bx/tests/vector_nodefault.cpp10
-rwxr-xr-x3rdparty/bx/tools/bin/darwin/geniebin483616 -> 487712 bytes
-rwxr-xr-x3rdparty/bx/tools/bin/linux/geniebin458392 -> 462504 bytes
-rw-r--r--3rdparty/bx/tools/bin/windows/genie.exebin460800 -> 467456 bytes
32 files changed, 3706 insertions, 2906 deletions
diff --git a/3rdparty/bx/include/bx/crtimpl.h b/3rdparty/bx/include/bx/crtimpl.h
index a4820334ee3..71b9e61e1c6 100644
--- a/3rdparty/bx/include/bx/crtimpl.h
+++ b/3rdparty/bx/include/bx/crtimpl.h
@@ -194,10 +194,10 @@ namespace bx
#if BX_CONFIG_CRT_PROCESS
-#if BX_COMPILER_MSVC_COMPATIBLE
+#if BX_CRT_MSVC
# define popen _popen
# define pclose _pclose
-#endif // BX_COMPILER_MSVC_COMPATIBLE
+#endif // BX_CRT_MSVC
class ProcessReader : public ReaderOpenI, public CloserI, public ReaderI
{
diff --git a/3rdparty/bx/include/bx/float4_langext.h b/3rdparty/bx/include/bx/float4_langext.h
deleted file mode 100644
index c5c3dddfa02..00000000000
--- a/3rdparty/bx/include/bx/float4_langext.h
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#ifndef BX_FLOAT4_LANGEXT_H_HEADER_GUARD
-#define BX_FLOAT4_LANGEXT_H_HEADER_GUARD
-
-#include <math.h>
-
-namespace bx
-{
- typedef union float4_t
- {
- float __attribute__((vector_size(16))) vf;
- int32_t __attribute__((vector_size(16))) vi;
- uint32_t __attribute__((vector_size(16))) vu;
- float fxyzw[4];
- int32_t ixyzw[4];
- uint32_t uxyzw[4];
-
- } float4_t;
-
-#define ELEMx 0
-#define ELEMy 1
-#define ELEMz 2
-#define ELEMw 3
-#define IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
- BX_FLOAT4_FORCE_INLINE float4_t float4_swiz_##_x##_y##_z##_w(float4_t _a) \
- { \
- float4_t result; \
- result.vf = __builtin_shufflevector(_a.vf, _a.vf, ELEM##_x, ELEM##_y, ELEM##_z, ELEM##_w); \
- return result; \
- }
-
-#include "float4_swizzle.inl"
-
-#undef IMPLEMENT_SWIZZLE
-#undef ELEMw
-#undef ELEMz
-#undef ELEMy
-#undef ELEMx
-
-#define IMPLEMENT_TEST(_xyzw, _mask) \
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_##_xyzw(float4_t _test) \
- { \
- uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
- | ( (_test.uxyzw[2]>>31)<<2) \
- | ( (_test.uxyzw[1]>>31)<<1) \
- | ( _test.uxyzw[0]>>31) \
- ; \
- return 0 != (tmp&(_mask) ); \
- } \
- \
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_##_xyzw(float4_t _test) \
- { \
- uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
- | ( (_test.uxyzw[2]>>31)<<2) \
- | ( (_test.uxyzw[1]>>31)<<1) \
- | ( _test.uxyzw[0]>>31) \
- ; \
- return (_mask) == (tmp&(_mask) ); \
- }
-
-IMPLEMENT_TEST(x , 0x1);
-IMPLEMENT_TEST(y , 0x2);
-IMPLEMENT_TEST(xy , 0x3);
-IMPLEMENT_TEST(z , 0x4);
-IMPLEMENT_TEST(xz , 0x5);
-IMPLEMENT_TEST(yz , 0x6);
-IMPLEMENT_TEST(xyz , 0x7);
-IMPLEMENT_TEST(w , 0x8);
-IMPLEMENT_TEST(xw , 0x9);
-IMPLEMENT_TEST(yw , 0xa);
-IMPLEMENT_TEST(xyw , 0xb);
-IMPLEMENT_TEST(zw , 0xc);
-IMPLEMENT_TEST(xzw , 0xd);
-IMPLEMENT_TEST(yzw , 0xe);
-IMPLEMENT_TEST(xyzw , 0xf);
-
-#undef IMPLEMENT_TEST
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xyAB(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 1, 4, 5);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_ABxy(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 4, 5, 0, 1);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CDzw(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 5, 7, 2, 3);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zwCD(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 2, 3, 5, 7);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xAyB(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 4, 1, 5);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_yBxA(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 1, 5, 0, 4);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zCwD(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 2, 6, 3, 7);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CzDw(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 6, 2, 7, 3);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xAzC(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 4, 2, 6);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_yBwD(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = __builtin_shufflevector(_a.vf, _b.vf, 1, 5, 3, 7);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_x(float4_t _a)
- {
- return _a.fxyzw[0];
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_y(float4_t _a)
- {
- return _a.fxyzw[1];
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_z(float4_t _a)
- {
- return _a.fxyzw[2];
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_w(float4_t _a)
- {
- return _a.fxyzw[3];
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(const void* _ptr)
- {
- const uint32_t* input = reinterpret_cast<const uint32_t*>(_ptr);
- float4_t result;
- result.uxyzw[0] = input[0];
- result.uxyzw[1] = input[1];
- result.uxyzw[2] = input[2];
- result.uxyzw[3] = input[3];
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_st(void* _ptr, float4_t _a)
- {
- uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
- result[0] = _a.uxyzw[0];
- result[1] = _a.uxyzw[1];
- result[2] = _a.uxyzw[2];
- result[3] = _a.uxyzw[3];
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stx(void* _ptr, float4_t _a)
- {
- uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
- result[0] = _a.uxyzw[0];
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stream(void* _ptr, float4_t _a)
- {
- uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
- result[0] = _a.uxyzw[0];
- result[1] = _a.uxyzw[1];
- result[2] = _a.uxyzw[2];
- result[3] = _a.uxyzw[3];
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(float _x, float _y, float _z, float _w)
- {
- float4_t result;
- result.vf = { _x, _y, _z, _w };
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
- {
- float4_t result;
- result.vu = { _x, _y, _z, _w };
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(const void* _ptr)
- {
- const uint32_t val = *reinterpret_cast<const uint32_t*>(_ptr);
- float4_t result;
- result.vu = { val, val, val, val };
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(float _a)
- {
- return float4_ld(_a, _a, _a, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isplat(uint32_t _a)
- {
- return float4_ild(_a, _a, _a, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_zero()
- {
- return float4_ild(0, 0, 0, 0);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_itof(float4_t _a)
- {
- float4_t result;
- result.vf = __builtin_convertvector(_a.vi, float __attribute__((vector_size(16))) );
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ftoi(float4_t _a)
- {
- float4_t result;
- result.vi = __builtin_convertvector(_a.vf, int32_t __attribute__((vector_size(16))) );
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_round(float4_t _a)
- {
- const float4_t tmp = float4_ftoi(_a);
- const float4_t result = float4_itof(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_add(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = _a.vf + _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sub(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = _a.vf - _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = _a.vf * _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_div(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vf = _a.vf / _b.vf;
- return result;
- }
-
-#if 0
- BX_FLOAT4_FORCE_INLINE float4_t float4_rcp_est(float4_t _a)
- {
- float4_t result;
- const float4_t one = float4_splat(1.0f);
- result.vf = one / _a.vf;
- return result;
- }
-#endif // 0
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sqrt(float4_t _a)
- {
- float4_t result;
- result.vf[0] = sqrtf(_a.vf[0]);
- result.vf[1] = sqrtf(_a.vf[1]);
- result.vf[2] = sqrtf(_a.vf[2]);
- result.vf[3] = sqrtf(_a.vf[3]);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_rsqrt_est(float4_t _a)
- {
- float4_t result;
- result.vf[0] = 1.0f / sqrtf(_a.vf[0]);
- result.vf[1] = 1.0f / sqrtf(_a.vf[1]);
- result.vf[2] = 1.0f / sqrtf(_a.vf[2]);
- result.vf[3] = 1.0f / sqrtf(_a.vf[3]);
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpeq(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vf == _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmplt(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vf < _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmple(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vf <= _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpgt(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vf > _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpge(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vf >= _b.vf;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_and(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vu = _a.vu & _b.vu;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_andc(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vu = _a.vu & ~_b.vu;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_or(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vu = _a.vu | _b.vu;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_xor(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vu = _a.vu ^ _b.vu;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sll(float4_t _a, int _count)
- {
- float4_t result;
- const float4_t count = float4_isplat(_count);
- result.vu = _a.vu << count.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_srl(float4_t _a, int _count)
- {
- float4_t result;
- const float4_t count = float4_isplat(_count);
- result.vu = _a.vu >> count.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sra(float4_t _a, int _count)
- {
- float4_t result;
- const float4_t count = float4_isplat(_count);
- result.vi = _a.vi >> count.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpeq(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vi == _b.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmplt(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vi < _b.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpgt(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vi > _b.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_iadd(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vi + _b.vi;
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isub(float4_t _a, float4_t _b)
- {
- float4_t result;
- result.vi = _a.vi - _b.vi;
- return result;
- }
-
-} // namespace bx
-
-#define float4_rcp float4_rcp_ni
-#define float4_orx float4_orx_ni
-#define float4_orc float4_orc_ni
-#define float4_neg float4_neg_ni
-#define float4_madd float4_madd_ni
-#define float4_nmsub float4_nmsub_ni
-#define float4_div_nr float4_div_nr_ni
-#define float4_selb float4_selb_ni
-#define float4_sels float4_sels_ni
-#define float4_not float4_not_ni
-#define float4_abs float4_abs_ni
-#define float4_clamp float4_clamp_ni
-#define float4_lerp float4_lerp_ni
-#define float4_rcp_est float4_rcp_ni
-#define float4_rsqrt float4_rsqrt_ni
-#define float4_rsqrt_nr float4_rsqrt_nr_ni
-#define float4_rsqrt_carmack float4_rsqrt_carmack_ni
-#define float4_sqrt_nr float4_sqrt_nr_ni
-#define float4_log2 float4_log2_ni
-#define float4_exp2 float4_exp2_ni
-#define float4_pow float4_pow_ni
-#define float4_cross3 float4_cross3_ni
-#define float4_normalize3 float4_normalize3_ni
-#define float4_dot3 float4_dot3_ni
-#define float4_dot float4_dot_ni
-#define float4_ceil float4_ceil_ni
-#define float4_floor float4_floor_ni
-#define float4_min float4_min_ni
-#define float4_max float4_max_ni
-#define float4_imin float4_imin_ni
-#define float4_imax float4_imax_ni
-#include "float4_ni.h"
-
-#endif // BX_FLOAT4_LANGEXT_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4_neon.h b/3rdparty/bx/include/bx/float4_neon.h
deleted file mode 100644
index 3b6fa185296..00000000000
--- a/3rdparty/bx/include/bx/float4_neon.h
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#ifndef BX_FLOAT4_NEON_H_HEADER_GUARD
-#define BX_FLOAT4_NEON_H_HEADER_GUARD
-
-namespace bx
-{
- typedef __builtin_neon_sf float4_t __attribute__( (__vector_size__(16) ) );
-
- typedef __builtin_neon_sf _f32x2_t __attribute__( (__vector_size__( 8) ) );
- typedef __builtin_neon_si _i32x4_t __attribute__( (__vector_size__(16) ) );
- typedef __builtin_neon_usi _u32x4_t __attribute__( (__vector_size__(16) ) );
-
-#define ELEMx 0
-#define ELEMy 1
-#define ELEMz 2
-#define ELEMw 3
-#define IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
- BX_FLOAT4_FORCE_INLINE float4_t float4_swiz_##_x##_y##_z##_w(float4_t _a) \
- { \
- return __builtin_shuffle(_a, (_u32x4_t){ ELEM##_x, ELEM##_y, ELEM##_z, ELEM##_w }); \
- }
-
-#include "float4_swizzle.inl"
-
-#undef IMPLEMENT_SWIZZLE
-#undef ELEMw
-#undef ELEMz
-#undef ELEMy
-#undef ELEMx
-
-#define IMPLEMENT_TEST(_xyzw, _swizzle) \
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_##_xyzw(float4_t _test); \
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_##_xyzw(float4_t _test);
-
-IMPLEMENT_TEST(x , xxxx);
-IMPLEMENT_TEST(y , yyyy);
-IMPLEMENT_TEST(xy , xyyy);
-IMPLEMENT_TEST(z , zzzz);
-IMPLEMENT_TEST(xz , xzzz);
-IMPLEMENT_TEST(yz , yzzz);
-IMPLEMENT_TEST(xyz , xyzz);
-IMPLEMENT_TEST(w , wwww);
-IMPLEMENT_TEST(xw , xwww);
-IMPLEMENT_TEST(yw , ywww);
-IMPLEMENT_TEST(xyw , xyww);
-IMPLEMENT_TEST(zw , zwww);
-IMPLEMENT_TEST(xzw , xzww);
-IMPLEMENT_TEST(yzw , yzww);
-IMPLEMENT_TEST(xyzw , xyzw);
-
-#undef IMPLEMENT_TEST
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xyAB(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 0, 1, 4, 5 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_ABxy(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 4, 5, 0, 1 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CDzw(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 6, 7, 2, 3 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zwCD(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 2, 3, 6, 7 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xAyB(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 0, 4, 1, 5 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_yBxA(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 1, 5, 0, 4 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zCwD(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 2, 6, 3, 7 });
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CzDw(float4_t _a, float4_t _b)
- {
- return __builtin_shuffle(_a, _b, (_u32x4_t){ 6, 2, 7, 3 });
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_x(float4_t _a)
- {
- return __builtin_neon_vget_lanev4sf(_a, 0, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_y(float4_t _a)
- {
- return __builtin_neon_vget_lanev4sf(_a, 1, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_z(float4_t _a)
- {
- return __builtin_neon_vget_lanev4sf(_a, 2, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_w(float4_t _a)
- {
- return __builtin_neon_vget_lanev4sf(_a, 3, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(const void* _ptr)
- {
- return __builtin_neon_vld1v4sf( (const __builtin_neon_sf*)_ptr);
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_st(void* _ptr, float4_t _a)
- {
- __builtin_neon_vst1v4sf( (__builtin_neon_sf*)_ptr, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stx(void* _ptr, float4_t _a)
- {
- __builtin_neon_vst1_lanev4sf( (__builtin_neon_sf*)_ptr, _a, 0);
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stream(void* _ptr, float4_t _a)
- {
- __builtin_neon_vst1v4sf( (__builtin_neon_sf*)_ptr, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(float _x, float _y, float _z, float _w)
- {
- const float4_t val[4] = {_x, _y, _z, _w};
- return float4_ld(val);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
- {
- const uint32_t val[4] = {_x, _y, _z, _w};
- const _i32x4_t tmp = __builtin_neon_vld1v4si( (const __builtin_neon_si*)val);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(const void* _ptr)
- {
- const float4_t tmp0 = __builtin_neon_vld1v4sf( (const __builtin_neon_sf *)_ptr);
- const _f32x2_t tmp1 = __builtin_neon_vget_lowv4sf(tmp0);
- const float4_t result = __builtin_neon_vdup_lanev4sf(tmp1, 0);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(float _a)
- {
- return __builtin_neon_vdup_nv4sf(_a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isplat(uint32_t _a)
- {
- const _i32x4_t tmp = __builtin_neon_vdup_nv4si( (__builtin_neon_si)_a);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_zero()
- {
- return float4_isplat(0);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_itof(float4_t _a)
- {
- const _i32x4_t itof = __builtin_neon_vreinterpretv4siv4sf(_a);
- const float4_t result = __builtin_neon_vcvtv4si(itof, 1);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ftoi(float4_t _a)
- {
- const _i32x4_t ftoi = __builtin_neon_vcvtv4sf(_a, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(ftoi);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_add(float4_t _a, float4_t _b)
- {
- return __builtin_neon_vaddv4sf(_a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sub(float4_t _a, float4_t _b)
- {
- return __builtin_neon_vsubv4sf(_a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul(float4_t _a, float4_t _b)
- {
- return __builtin_neon_vmulv4sf(_a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_rcp_est(float4_t _a)
- {
- return __builtin_neon_vrecpev4sf(_a, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_rsqrt_est(float4_t _a)
- {
- return __builtin_neon_vrsqrtev4sf(_a, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpeq(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp = __builtin_neon_vceqv4sf(_a, _b, 3);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmplt(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp = __builtin_neon_vcgtv4sf(_b, _a, 3);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmple(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp = __builtin_neon_vcgev4sf(_b, _a, 3);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpgt(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp = __builtin_neon_vcgtv4sf(_a, _b, 3);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpge(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp = __builtin_neon_vcgev4sf(_a, _b, 3);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_min(float4_t _a, float4_t _b)
- {
- return __builtin_neon_vminv4sf(_a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_max(float4_t _a, float4_t _b)
- {
- return __builtin_neon_vmaxv4sf(_a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_and(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vandv4si(tmp0, tmp1, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_andc(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vbicv4si(tmp0, tmp1, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_or(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vorrv4si(tmp0, tmp1, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_xor(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_veorv4si(tmp0, tmp1, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sll(float4_t _a, int _count)
- {
- if (__builtin_constant_p(_count) )
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vshl_nv4si(tmp0, _count, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t shift = __builtin_neon_vdup_nv4si( (__builtin_neon_si)_count);
- const _i32x4_t tmp1 = __builtin_neon_vshlv4si(tmp0, shift, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_srl(float4_t _a, int _count)
- {
- if (__builtin_constant_p(_count) )
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vshr_nv4si(tmp0, _count, 0);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t shift = __builtin_neon_vdup_nv4si( (__builtin_neon_si)-_count);
- const _i32x4_t tmp1 = __builtin_neon_vshlv4si(tmp0, shift, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sra(float4_t _a, int _count)
- {
- if (__builtin_constant_p(_count) )
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vshr_nv4si(tmp0, _count, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t shift = __builtin_neon_vdup_nv4si( (__builtin_neon_si)-_count);
- const _i32x4_t tmp1 = __builtin_neon_vshlv4si(tmp0, shift, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp1);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_madd(float4_t _a, float4_t _b, float4_t _c)
- {
- return __builtin_neon_vmlav4sf(_c, _a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_nmsub(float4_t _a, float4_t _b, float4_t _c)
- {
- return __builtin_neon_vmlsv4sf(_c, _a, _b, 3);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpeq(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vceqv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmplt(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vcgtv4si(tmp1, tmp0, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpgt(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vcgtv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_imin(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vminv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_imax(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vmaxv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_iadd(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vaddv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isub(float4_t _a, float4_t _b)
- {
- const _i32x4_t tmp0 = __builtin_neon_vreinterpretv4siv4sf(_a);
- const _i32x4_t tmp1 = __builtin_neon_vreinterpretv4siv4sf(_b);
- const _i32x4_t tmp2 = __builtin_neon_vsubv4si(tmp0, tmp1, 1);
- const float4_t result = __builtin_neon_vreinterpretv4sfv4si(tmp2);
-
- return result;
- }
-
-} // namespace bx
-
-#define float4_shuf_xAzC float4_shuf_xAzC_ni
-#define float4_shuf_yBwD float4_shuf_yBwD_ni
-#define float4_rcp float4_rcp_ni
-#define float4_orx float4_orx_ni
-#define float4_orc float4_orc_ni
-#define float4_neg float4_neg_ni
-#define float4_madd float4_madd_ni
-#define float4_nmsub float4_nmsub_ni
-#define float4_div_nr float4_div_nr_ni
-#define float4_div float4_div_nr_ni
-#define float4_selb float4_selb_ni
-#define float4_sels float4_sels_ni
-#define float4_not float4_not_ni
-#define float4_abs float4_abs_ni
-#define float4_clamp float4_clamp_ni
-#define float4_lerp float4_lerp_ni
-#define float4_rsqrt float4_rsqrt_ni
-#define float4_rsqrt_nr float4_rsqrt_nr_ni
-#define float4_rsqrt_carmack float4_rsqrt_carmack_ni
-#define float4_sqrt_nr float4_sqrt_nr_ni
-#define float4_sqrt float4_sqrt_nr_ni
-#define float4_log2 float4_log2_ni
-#define float4_exp2 float4_exp2_ni
-#define float4_pow float4_pow_ni
-#define float4_cross3 float4_cross3_ni
-#define float4_normalize3 float4_normalize3_ni
-#define float4_dot3 float4_dot3_ni
-#define float4_dot float4_dot_ni
-#define float4_ceil float4_ceil_ni
-#define float4_floor float4_floor_ni
-
-#include "float4_ni.h"
-
-namespace bx
-{
-#define IMPLEMENT_TEST(_xyzw, _swizzle) \
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_##_xyzw(float4_t _test) \
- { \
- const float4_t tmp0 = float4_swiz_##_swizzle(_test); \
- return float4_test_any_ni(tmp0); \
- } \
- \
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_##_xyzw(float4_t _test) \
- { \
- const float4_t tmp0 = float4_swiz_##_swizzle(_test); \
- return float4_test_all_ni(tmp0); \
- }
-
-IMPLEMENT_TEST(x , xxxx);
-IMPLEMENT_TEST(y , yyyy);
-IMPLEMENT_TEST(xy , xyyy);
-IMPLEMENT_TEST(z , zzzz);
-IMPLEMENT_TEST(xz , xzzz);
-IMPLEMENT_TEST(yz , yzzz);
-IMPLEMENT_TEST(xyz , xyzz);
-IMPLEMENT_TEST(w , wwww);
-IMPLEMENT_TEST(xw , xwww);
-IMPLEMENT_TEST(yw , ywww);
-IMPLEMENT_TEST(xyw , xyww);
-IMPLEMENT_TEST(zw , zwww);
-IMPLEMENT_TEST(xzw , xzww);
-IMPLEMENT_TEST(yzw , yzww);
-
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_xyzw(float4_t _test)
- {
- return float4_test_any_ni(_test);
- }
-
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_xyzw(float4_t _test)
- {
- return float4_test_all_ni(_test);
- }
-
-#undef IMPLEMENT_TEST
-} // namespace bx
-
-#endif // BX_FLOAT4_NEON_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4_ni.h b/3rdparty/bx/include/bx/float4_ni.h
deleted file mode 100644
index 644fa6eb386..00000000000
--- a/3rdparty/bx/include/bx/float4_ni.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#ifndef BX_FLOAT4_NI_H_HEADER_GUARD
-#define BX_FLOAT4_NI_H_HEADER_GUARD
-
-namespace bx
-{
- BX_FLOAT4_INLINE float4_t float4_rcp_ni(float4_t _a);
-
- BX_FLOAT4_INLINE float4_t float4_shuf_xAzC_ni(float4_t _a, float4_t _b)
- {
- const float4_t xAyB = float4_shuf_xAyB(_a, _b);
- const float4_t zCwD = float4_shuf_zCwD(_a, _b);
- const float4_t result = float4_shuf_xyAB(xAyB, zCwD);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_shuf_yBwD_ni(float4_t _a, float4_t _b)
- {
- const float4_t xAyB = float4_shuf_xAyB(_a, _b);
- const float4_t zCwD = float4_shuf_zCwD(_a, _b);
- const float4_t result = float4_shuf_zwCD(xAyB, zCwD);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_madd_ni(float4_t _a, float4_t _b, float4_t _c)
- {
- const float4_t mul = float4_mul(_a, _b);
- const float4_t result = float4_add(mul, _c);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_nmsub_ni(float4_t _a, float4_t _b, float4_t _c)
- {
- const float4_t mul = float4_mul(_a, _b);
- const float4_t result = float4_sub(_c, mul);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_div_nr_ni(float4_t _a, float4_t _b)
- {
- const float4_t oneish = float4_isplat(0x3f800001);
- const float4_t est = float4_rcp_est(_b);
- const float4_t iter0 = float4_mul(_a, est);
- const float4_t tmp1 = float4_nmsub(_b, est, oneish);
- const float4_t result = float4_madd(tmp1, iter0, iter0);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_rcp_ni(float4_t _a)
- {
- const float4_t one = float4_splat(1.0f);
- const float4_t result = float4_div(one, _a);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_orx_ni(float4_t _a)
- {
- const float4_t zwxy = float4_swiz_zwxy(_a);
- const float4_t tmp0 = float4_or(_a, zwxy);
- const float4_t tmp1 = float4_swiz_yyyy(_a);
- const float4_t tmp2 = float4_or(tmp0, tmp1);
- const float4_t mf000 = float4_ild(UINT32_MAX, 0, 0, 0);
- const float4_t result = float4_and(tmp2, mf000);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_orc_ni(float4_t _a, float4_t _b)
- {
- const float4_t aorb = float4_or(_a, _b);
- const float4_t mffff = float4_isplat(UINT32_MAX);
- const float4_t result = float4_xor(aorb, mffff);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_neg_ni(float4_t _a)
- {
- const float4_t zero = float4_zero();
- const float4_t result = float4_sub(zero, _a);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_selb_ni(float4_t _mask, float4_t _a, float4_t _b)
- {
- const float4_t sel_a = float4_and(_a, _mask);
- const float4_t sel_b = float4_andc(_b, _mask);
- const float4_t result = float4_or(sel_a, sel_b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_sels_ni(float4_t _test, float4_t _a, float4_t _b)
- {
- const float4_t mask = float4_sra(_test, 31);
- const float4_t result = float4_selb(mask, _a, _b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_not_ni(float4_t _a)
- {
- const float4_t mffff = float4_isplat(UINT32_MAX);
- const float4_t result = float4_xor(_a, mffff);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_min_ni(float4_t _a, float4_t _b)
- {
- const float4_t mask = float4_cmplt(_a, _b);
- const float4_t result = float4_selb(mask, _a, _b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_max_ni(float4_t _a, float4_t _b)
- {
- const float4_t mask = float4_cmpgt(_a, _b);
- const float4_t result = float4_selb(mask, _a, _b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_abs_ni(float4_t _a)
- {
- const float4_t a_neg = float4_neg(_a);
- const float4_t result = float4_max(a_neg, _a);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_imin_ni(float4_t _a, float4_t _b)
- {
- const float4_t mask = float4_icmplt(_a, _b);
- const float4_t result = float4_selb(mask, _a, _b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_imax_ni(float4_t _a, float4_t _b)
- {
- const float4_t mask = float4_icmpgt(_a, _b);
- const float4_t result = float4_selb(mask, _a, _b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_clamp_ni(float4_t _a, float4_t _min, float4_t _max)
- {
- const float4_t tmp = float4_min(_a, _max);
- const float4_t result = float4_max(tmp, _min);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_lerp_ni(float4_t _a, float4_t _b, float4_t _s)
- {
- const float4_t ba = float4_sub(_b, _a);
- const float4_t result = float4_madd(_s, ba, _a);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_sqrt_nr_ni(float4_t _a)
- {
- const float4_t half = float4_splat(0.5f);
- const float4_t one = float4_splat(1.0f);
- const float4_t tmp0 = float4_rsqrt_est(_a);
- const float4_t tmp1 = float4_mul(tmp0, _a);
- const float4_t tmp2 = float4_mul(tmp1, half);
- const float4_t tmp3 = float4_nmsub(tmp0, tmp1, one);
- const float4_t result = float4_madd(tmp3, tmp2, tmp1);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_sqrt_nr1_ni(float4_t _a)
- {
- const float4_t half = float4_splat(0.5f);
-
- float4_t result = _a;
- for (uint32_t ii = 0; ii < 11; ++ii)
- {
- const float4_t tmp1 = float4_div(_a, result);
- const float4_t tmp2 = float4_add(tmp1, result);
- result = float4_mul(tmp2, half);
- }
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_rsqrt_ni(float4_t _a)
- {
- const float4_t one = float4_splat(1.0f);
- const float4_t sqrt = float4_sqrt(_a);
- const float4_t result = float4_div(one, sqrt);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_rsqrt_nr_ni(float4_t _a)
- {
- const float4_t rsqrt = float4_rsqrt_est(_a);
- const float4_t iter0 = float4_mul(_a, rsqrt);
- const float4_t iter1 = float4_mul(iter0, rsqrt);
- const float4_t half = float4_splat(0.5f);
- const float4_t half_rsqrt = float4_mul(half, rsqrt);
- const float4_t three = float4_splat(3.0f);
- const float4_t three_sub_iter1 = float4_sub(three, iter1);
- const float4_t result = float4_mul(half_rsqrt, three_sub_iter1);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_rsqrt_carmack_ni(float4_t _a)
- {
- const float4_t half = float4_splat(0.5f);
- const float4_t ah = float4_mul(half, _a);
- const float4_t ashift = float4_sra(_a, 1);
- const float4_t magic = float4_isplat(0x5f3759df);
- const float4_t msuba = float4_isub(magic, ashift);
- const float4_t msubasq = float4_mul(msuba, msuba);
- const float4_t tmp0 = float4_splat(1.5f);
- const float4_t tmp1 = float4_mul(ah, msubasq);
- const float4_t tmp2 = float4_sub(tmp0, tmp1);
- const float4_t result = float4_mul(msuba, tmp2);
-
- return result;
- }
-
- namespace float4_logexp_detail
- {
- BX_FLOAT4_INLINE float4_t float4_poly1(float4_t _a, float _b, float _c)
- {
- const float4_t bbbb = float4_splat(_b);
- const float4_t cccc = float4_splat(_c);
- const float4_t result = float4_madd(cccc, _a, bbbb);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_poly2(float4_t _a, float _b, float _c, float _d)
- {
- const float4_t bbbb = float4_splat(_b);
- const float4_t poly = float4_poly1(_a, _c, _d);
- const float4_t result = float4_madd(poly, _a, bbbb);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_poly3(float4_t _a, float _b, float _c, float _d, float _e)
- {
- const float4_t bbbb = float4_splat(_b);
- const float4_t poly = float4_poly2(_a, _c, _d, _e);
- const float4_t result = float4_madd(poly, _a, bbbb);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_poly4(float4_t _a, float _b, float _c, float _d, float _e, float _f)
- {
- const float4_t bbbb = float4_splat(_b);
- const float4_t poly = float4_poly3(_a, _c, _d, _e, _f);
- const float4_t result = float4_madd(poly, _a, bbbb);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_poly5(float4_t _a, float _b, float _c, float _d, float _e, float _f, float _g)
- {
- const float4_t bbbb = float4_splat(_b);
- const float4_t poly = float4_poly4(_a, _c, _d, _e, _f, _g);
- const float4_t result = float4_madd(poly, _a, bbbb);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_logpoly(float4_t _a)
- {
-#if 1
- const float4_t result = float4_poly5(_a
- , 3.11578814719469302614f, -3.32419399085241980044f
- , 2.59883907202499966007f, -1.23152682416275988241f
- , 0.318212422185251071475f, -0.0344359067839062357313f
- );
-#elif 0
- const float4_t result = float4_poly4(_a
- , 2.8882704548164776201f, -2.52074962577807006663f
- , 1.48116647521213171641f, -0.465725644288844778798f
- , 0.0596515482674574969533f
- );
-#elif 0
- const float4_t result = float4_poly3(_a
- , 2.61761038894603480148f, -1.75647175389045657003f
- , 0.688243882994381274313f, -0.107254423828329604454f
- );
-#else
- const float4_t result = float4_poly2(_a
- , 2.28330284476918490682f, -1.04913055217340124191f
- , 0.204446009836232697516f
- );
-#endif
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_exppoly(float4_t _a)
- {
-#if 1
- const float4_t result = float4_poly5(_a
- , 9.9999994e-1f, 6.9315308e-1f
- , 2.4015361e-1f, 5.5826318e-2f
- , 8.9893397e-3f, 1.8775767e-3f
- );
-#elif 0
- const float4_t result = float4_poly4(_a
- , 1.0000026f, 6.9300383e-1f
- , 2.4144275e-1f, 5.2011464e-2f
- , 1.3534167e-2f
- );
-#elif 0
- const float4_t result = float4_poly3(_a
- , 9.9992520e-1f, 6.9583356e-1f
- , 2.2606716e-1f, 7.8024521e-2f
- );
-#else
- const float4_t result = float4_poly2(_a
- , 1.0017247f, 6.5763628e-1f
- , 3.3718944e-1f
- );
-#endif // 0
-
- return result;
- }
- } // namespace float4_internal
-
- BX_FLOAT4_INLINE float4_t float4_log2_ni(float4_t _a)
- {
- const float4_t expmask = float4_isplat(0x7f800000);
- const float4_t mantmask = float4_isplat(0x007fffff);
- const float4_t one = float4_splat(1.0f);
-
- const float4_t c127 = float4_isplat(127);
- const float4_t aexp = float4_and(_a, expmask);
- const float4_t aexpsr = float4_srl(aexp, 23);
- const float4_t tmp0 = float4_isub(aexpsr, c127);
- const float4_t exp = float4_itof(tmp0);
-
- const float4_t amask = float4_and(_a, mantmask);
- const float4_t mant = float4_or(amask, one);
-
- const float4_t poly = float4_logexp_detail::float4_logpoly(mant);
-
- const float4_t mandiff = float4_sub(mant, one);
- const float4_t result = float4_madd(poly, mandiff, exp);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_exp2_ni(float4_t _a)
- {
- const float4_t min = float4_splat( 129.0f);
- const float4_t max = float4_splat(-126.99999f);
- const float4_t tmp0 = float4_min(_a, min);
- const float4_t aaaa = float4_max(tmp0, max);
-
- const float4_t half = float4_splat(0.5f);
- const float4_t tmp2 = float4_sub(aaaa, half);
- const float4_t ipart = float4_ftoi(tmp2);
- const float4_t iround = float4_itof(ipart);
- const float4_t fpart = float4_sub(aaaa, iround);
-
- const float4_t c127 = float4_isplat(127);
- const float4_t tmp5 = float4_iadd(ipart, c127);
- const float4_t expipart = float4_sll(tmp5, 23);
-
- const float4_t expfpart = float4_logexp_detail::float4_exppoly(fpart);
-
- const float4_t result = float4_mul(expipart, expfpart);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_pow_ni(float4_t _a, float4_t _b)
- {
- const float4_t alog2 = float4_log2(_a);
- const float4_t alog2b = float4_mul(alog2, _b);
- const float4_t result = float4_exp2(alog2b);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_dot3_ni(float4_t _a, float4_t _b)
- {
- const float4_t xyzw = float4_mul(_a, _b);
- const float4_t xxxx = float4_swiz_xxxx(xyzw);
- const float4_t yyyy = float4_swiz_yyyy(xyzw);
- const float4_t zzzz = float4_swiz_zzzz(xyzw);
- const float4_t tmp1 = float4_add(xxxx, yyyy);
- const float4_t result = float4_add(zzzz, tmp1);
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_cross3_ni(float4_t _a, float4_t _b)
- {
- // a.yzx * b.zxy - a.zxy * b.yzx == (a * b.yzx - a.yzx * b).yzx
-#if 0
- const float4_t a_yzxw = float4_swiz_yzxw(_a);
- const float4_t a_zxyw = float4_swiz_zxyw(_a);
- const float4_t b_zxyw = float4_swiz_zxyw(_b);
- const float4_t b_yzxw = float4_swiz_yzxw(_b);
- const float4_t tmp = float4_mul(a_yzxw, b_zxyw);
- const float4_t result = float4_nmsub(a_zxyw, b_yzxw, tmp);
-#else
- const float4_t a_yzxw = float4_swiz_yzxw(_a);
- const float4_t b_yzxw = float4_swiz_yzxw(_b);
- const float4_t tmp0 = float4_mul(_a, b_yzxw);
- const float4_t tmp1 = float4_nmsub(a_yzxw, _b, tmp0);
- const float4_t result = float4_swiz_yzxw(tmp1);
-#endif
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_normalize3_ni(float4_t _a)
- {
- const float4_t dot3 = float4_dot3(_a, _a);
- const float4_t invSqrt = float4_rsqrt(dot3);
- const float4_t result = float4_mul(_a, invSqrt);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_dot_ni(float4_t _a, float4_t _b)
- {
- const float4_t xyzw = float4_mul(_a, _b);
- const float4_t yzwx = float4_swiz_yzwx(xyzw);
- const float4_t tmp0 = float4_add(xyzw, yzwx);
- const float4_t zwxy = float4_swiz_zwxy(tmp0);
- const float4_t result = float4_add(tmp0, zwxy);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_ceil_ni(float4_t _a)
- {
- const float4_t tmp0 = float4_ftoi(_a);
- const float4_t tmp1 = float4_itof(tmp0);
- const float4_t mask = float4_cmplt(tmp1, _a);
- const float4_t one = float4_splat(1.0f);
- const float4_t tmp2 = float4_and(one, mask);
- const float4_t result = float4_add(tmp1, tmp2);
-
- return result;
- }
-
- BX_FLOAT4_INLINE float4_t float4_floor_ni(float4_t _a)
- {
- const float4_t tmp0 = float4_ftoi(_a);
- const float4_t tmp1 = float4_itof(tmp0);
- const float4_t mask = float4_cmpgt(tmp1, _a);
- const float4_t one = float4_splat(1.0f);
- const float4_t tmp2 = float4_and(one, mask);
- const float4_t result = float4_sub(tmp1, tmp2);
-
- return result;
- }
-
- BX_FLOAT4_INLINE bool float4_test_any_ni(float4_t _a)
- {
- const float4_t mask = float4_sra(_a, 31);
- const float4_t zwxy = float4_swiz_zwxy(mask);
- const float4_t tmp0 = float4_or(mask, zwxy);
- const float4_t tmp1 = float4_swiz_yyyy(tmp0);
- const float4_t tmp2 = float4_or(tmp0, tmp1);
- int res;
- float4_stx(&res, tmp2);
- return 0 != res;
- }
-
- BX_FLOAT4_INLINE bool float4_test_all_ni(float4_t _a)
- {
- const float4_t bits = float4_sra(_a, 31);
- const float4_t m1248 = float4_ild(1, 2, 4, 8);
- const float4_t mask = float4_and(bits, m1248);
- const float4_t zwxy = float4_swiz_zwxy(mask);
- const float4_t tmp0 = float4_or(mask, zwxy);
- const float4_t tmp1 = float4_swiz_yyyy(tmp0);
- const float4_t tmp2 = float4_or(tmp0, tmp1);
- int res;
- float4_stx(&res, tmp2);
- return 0xf == res;
- }
-
-} // namespace bx
-
-#endif // BX_FLOAT4_NI_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4_sse.h b/3rdparty/bx/include/bx/float4_sse.h
deleted file mode 100644
index 73272518efe..00000000000
--- a/3rdparty/bx/include/bx/float4_sse.h
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#ifndef BX_FLOAT4_SSE_H_HEADER_GUARD
-#define BX_FLOAT4_SSE_H_HEADER_GUARD
-
-#include <emmintrin.h> // __m128i
-#if defined(__SSE4_1__)
-# include <smmintrin.h>
-#endif // defined(__SSE4_1__)
-#include <xmmintrin.h> // __m128
-
-namespace bx
-{
- typedef __m128 float4_t;
-
-#define ELEMx 0
-#define ELEMy 1
-#define ELEMz 2
-#define ELEMw 3
-#define IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
- BX_FLOAT4_FORCE_INLINE float4_t float4_swiz_##_x##_y##_z##_w(float4_t _a) \
- { \
- return _mm_shuffle_ps( _a, _a, _MM_SHUFFLE(ELEM##_w, ELEM##_z, ELEM##_y, ELEM##_x ) ); \
- }
-
-#include "float4_swizzle.inl"
-
-#undef IMPLEMENT_SWIZZLE
-#undef ELEMw
-#undef ELEMz
-#undef ELEMy
-#undef ELEMx
-
-#define IMPLEMENT_TEST(_xyzw, _mask) \
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_##_xyzw(float4_t _test) \
- { \
- return 0x0 != (_mm_movemask_ps(_test)&(_mask) ); \
- } \
- \
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_##_xyzw(float4_t _test) \
- { \
- return (_mask) == (_mm_movemask_ps(_test)&(_mask) ); \
- }
-
-IMPLEMENT_TEST(x , 0x1);
-IMPLEMENT_TEST(y , 0x2);
-IMPLEMENT_TEST(xy , 0x3);
-IMPLEMENT_TEST(z , 0x4);
-IMPLEMENT_TEST(xz , 0x5);
-IMPLEMENT_TEST(yz , 0x6);
-IMPLEMENT_TEST(xyz , 0x7);
-IMPLEMENT_TEST(w , 0x8);
-IMPLEMENT_TEST(xw , 0x9);
-IMPLEMENT_TEST(yw , 0xa);
-IMPLEMENT_TEST(xyw , 0xb);
-IMPLEMENT_TEST(zw , 0xc);
-IMPLEMENT_TEST(xzw , 0xd);
-IMPLEMENT_TEST(yzw , 0xe);
-IMPLEMENT_TEST(xyzw , 0xf);
-
-#undef IMPLEMENT_TEST
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xyAB(float4_t _a, float4_t _b)
- {
- return _mm_movelh_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_ABxy(float4_t _a, float4_t _b)
- {
- return _mm_movelh_ps(_b, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CDzw(float4_t _a, float4_t _b)
- {
- return _mm_movehl_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zwCD(float4_t _a, float4_t _b)
- {
- return _mm_movehl_ps(_b, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xAyB(float4_t _a, float4_t _b)
- {
- return _mm_unpacklo_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_yBxA(float4_t _a, float4_t _b)
- {
- return _mm_unpacklo_ps(_b, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zCwD(float4_t _a, float4_t _b)
- {
- return _mm_unpackhi_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CzDw(float4_t _a, float4_t _b)
- {
- return _mm_unpackhi_ps(_b, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_x(float4_t _a)
- {
- return _mm_cvtss_f32(_a);
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_y(float4_t _a)
- {
- const float4_t yyyy = float4_swiz_yyyy(_a);
- const float result = _mm_cvtss_f32(yyyy);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_z(float4_t _a)
- {
- const float4_t zzzz = float4_swiz_zzzz(_a);
- const float result = _mm_cvtss_f32(zzzz);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float float4_w(float4_t _a)
- {
- const float4_t wwww = float4_swiz_wwww(_a);
- const float result = _mm_cvtss_f32(wwww);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(const void* _ptr)
- {
- return _mm_load_ps(reinterpret_cast<const float*>(_ptr) );
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_st(void* _ptr, float4_t _a)
- {
- _mm_store_ps(reinterpret_cast<float*>(_ptr), _a);
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stx(void* _ptr, float4_t _a)
- {
- _mm_store_ss(reinterpret_cast<float*>(_ptr), _a);
- }
-
- BX_FLOAT4_FORCE_INLINE void float4_stream(void* _ptr, float4_t _a)
- {
- _mm_stream_ps(reinterpret_cast<float*>(_ptr), _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(float _x, float _y, float _z, float _w)
- {
- return _mm_set_ps(_w, _z, _y, _x);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
- {
- const __m128i set = _mm_set_epi32(_w, _z, _y, _x);
- const float4_t result = _mm_castsi128_ps(set);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(const void* _ptr)
- {
- const float4_t x___ = _mm_load_ss(reinterpret_cast<const float*>(_ptr) );
- const float4_t result = float4_swiz_xxxx(x___);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(float _a)
- {
- return _mm_set1_ps(_a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isplat(uint32_t _a)
- {
- const __m128i splat = _mm_set1_epi32(_a);
- const float4_t result = _mm_castsi128_ps(splat);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_zero()
- {
- return _mm_setzero_ps();
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_itof(float4_t _a)
- {
- const __m128i itof = _mm_castps_si128(_a);
- const float4_t result = _mm_cvtepi32_ps(itof);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_ftoi(float4_t _a)
- {
- const __m128i ftoi = _mm_cvtps_epi32(_a);
- const float4_t result = _mm_castsi128_ps(ftoi);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_round(float4_t _a)
- {
-#if defined(__SSE4_1__)
- return _mm_round_ps(_a, _MM_FROUND_NINT);
-#else
- const __m128i round = _mm_cvtps_epi32(_a);
- const float4_t result = _mm_cvtepi32_ps(round);
-
- return result;
-#endif // defined(__SSE4_1__)
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_add(float4_t _a, float4_t _b)
- {
- return _mm_add_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sub(float4_t _a, float4_t _b)
- {
- return _mm_sub_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul(float4_t _a, float4_t _b)
- {
- return _mm_mul_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_div(float4_t _a, float4_t _b)
- {
- return _mm_div_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_rcp_est(float4_t _a)
- {
- return _mm_rcp_ps(_a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sqrt(float4_t _a)
- {
- return _mm_sqrt_ps(_a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_rsqrt_est(float4_t _a)
- {
- return _mm_rsqrt_ps(_a);
- }
-
-#if defined(__SSE4_1__)
- BX_FLOAT4_FORCE_INLINE float4_t float4_dot3(float4_t _a, float4_t _b)
- {
- return _mm_dp_ps(_a, _b, 0x77);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_dot(float4_t _a, float4_t _b)
- {
- return _mm_dp_ps(_a, _b, 0xFF);
- }
-#endif // defined(__SSE4__)
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpeq(float4_t _a, float4_t _b)
- {
- return _mm_cmpeq_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmplt(float4_t _a, float4_t _b)
- {
- return _mm_cmplt_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmple(float4_t _a, float4_t _b)
- {
- return _mm_cmple_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpgt(float4_t _a, float4_t _b)
- {
- return _mm_cmpgt_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpge(float4_t _a, float4_t _b)
- {
- return _mm_cmpge_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_min(float4_t _a, float4_t _b)
- {
- return _mm_min_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_max(float4_t _a, float4_t _b)
- {
- return _mm_max_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_and(float4_t _a, float4_t _b)
- {
- return _mm_and_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_andc(float4_t _a, float4_t _b)
- {
- return _mm_andnot_ps(_b, _a);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_or(float4_t _a, float4_t _b)
- {
- return _mm_or_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_xor(float4_t _a, float4_t _b)
- {
- return _mm_xor_ps(_a, _b);
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sll(float4_t _a, int _count)
- {
- const __m128i a = _mm_castps_si128(_a);
- const __m128i shift = _mm_slli_epi32(a, _count);
- const float4_t result = _mm_castsi128_ps(shift);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_srl(float4_t _a, int _count)
- {
- const __m128i a = _mm_castps_si128(_a);
- const __m128i shift = _mm_srli_epi32(a, _count);
- const float4_t result = _mm_castsi128_ps(shift);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_sra(float4_t _a, int _count)
- {
- const __m128i a = _mm_castps_si128(_a);
- const __m128i shift = _mm_srai_epi32(a, _count);
- const float4_t result = _mm_castsi128_ps(shift);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpeq(float4_t _a, float4_t _b)
- {
- const __m128i tmp0 = _mm_castps_si128(_a);
- const __m128i tmp1 = _mm_castps_si128(_b);
- const __m128i tmp2 = _mm_cmpeq_epi32(tmp0, tmp1);
- const float4_t result = _mm_castsi128_ps(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmplt(float4_t _a, float4_t _b)
- {
- const __m128i tmp0 = _mm_castps_si128(_a);
- const __m128i tmp1 = _mm_castps_si128(_b);
- const __m128i tmp2 = _mm_cmplt_epi32(tmp0, tmp1);
- const float4_t result = _mm_castsi128_ps(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpgt(float4_t _a, float4_t _b)
- {
- const __m128i tmp0 = _mm_castps_si128(_a);
- const __m128i tmp1 = _mm_castps_si128(_b);
- const __m128i tmp2 = _mm_cmpgt_epi32(tmp0, tmp1);
- const float4_t result = _mm_castsi128_ps(tmp2);
-
- return result;
- }
-
-#if defined(__SSE4_1__)
- BX_FLOAT4_FORCE_INLINE float4_t float4_imin(float4_t _a, float4_t _b)
- {
- const __m128i tmp0 = _mm_castps_si128(_a);
- const __m128i tmp1 = _mm_castps_si128(_b);
- const __m128i tmp2 = _mm_min_epi32(tmp0, tmp1);
- const float4_t result = _mm_castsi128_ps(tmp2);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_imax(float4_t _a, float4_t _b)
- {
- const __m128i tmp0 = _mm_castps_si128(_a);
- const __m128i tmp1 = _mm_castps_si128(_b);
- const __m128i tmp2 = _mm_max_epi32(tmp0, tmp1);
- const float4_t result = _mm_castsi128_ps(tmp2);
-
- return result;
- }
-#endif // defined(__SSE4_1__)
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_iadd(float4_t _a, float4_t _b)
- {
- const __m128i a = _mm_castps_si128(_a);
- const __m128i b = _mm_castps_si128(_b);
- const __m128i add = _mm_add_epi32(a, b);
- const float4_t result = _mm_castsi128_ps(add);
-
- return result;
- }
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_isub(float4_t _a, float4_t _b)
- {
- const __m128i a = _mm_castps_si128(_a);
- const __m128i b = _mm_castps_si128(_b);
- const __m128i sub = _mm_sub_epi32(a, b);
- const float4_t result = _mm_castsi128_ps(sub);
-
- return result;
- }
-
-} // namespace bx
-
-#define float4_shuf_xAzC float4_shuf_xAzC_ni
-#define float4_shuf_yBwD float4_shuf_yBwD_ni
-#define float4_rcp float4_rcp_ni
-#define float4_orx float4_orx_ni
-#define float4_orc float4_orc_ni
-#define float4_neg float4_neg_ni
-#define float4_madd float4_madd_ni
-#define float4_nmsub float4_nmsub_ni
-#define float4_div_nr float4_div_nr_ni
-#define float4_selb float4_selb_ni
-#define float4_sels float4_sels_ni
-#define float4_not float4_not_ni
-#define float4_abs float4_abs_ni
-#define float4_clamp float4_clamp_ni
-#define float4_lerp float4_lerp_ni
-#define float4_rsqrt float4_rsqrt_ni
-#define float4_rsqrt_nr float4_rsqrt_nr_ni
-#define float4_rsqrt_carmack float4_rsqrt_carmack_ni
-#define float4_sqrt_nr float4_sqrt_nr_ni
-#define float4_log2 float4_log2_ni
-#define float4_exp2 float4_exp2_ni
-#define float4_pow float4_pow_ni
-#define float4_cross3 float4_cross3_ni
-#define float4_normalize3 float4_normalize3_ni
-#define float4_ceil float4_ceil_ni
-#define float4_floor float4_floor_ni
-
-#if !defined(__SSE4_1__)
-# define float4_dot3 float4_dot3_ni
-# define float4_dot float4_dot_ni
-# define float4_imin float4_imin_ni
-# define float4_imax float4_imax_ni
-#endif // defined(__SSE4_1__)
-
-#include "float4_ni.h"
-
-#endif // BX_FLOAT4_SSE_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4_swizzle.inl b/3rdparty/bx/include/bx/float4_swizzle.inl
deleted file mode 100644
index e53b8f020b2..00000000000
--- a/3rdparty/bx/include/bx/float4_swizzle.inl
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright 2010-2015 Branimir Karadzic. All rights reserved.
- * License: http://www.opensource.org/licenses/BSD-2-Clause
- */
-
-#ifndef BX_FLOAT4_T_H_HEADER_GUARD
-# error "xmacro file, must be included from float4_*.h"
-#endif // BX_FLOAT4_T_H_HEADER_GUARD
-
-// included from float4_t.h
-IMPLEMENT_SWIZZLE(x, x, x, x)
-IMPLEMENT_SWIZZLE(x, x, x, y)
-IMPLEMENT_SWIZZLE(x, x, x, z)
-IMPLEMENT_SWIZZLE(x, x, x, w)
-IMPLEMENT_SWIZZLE(x, x, y, x)
-IMPLEMENT_SWIZZLE(x, x, y, y)
-IMPLEMENT_SWIZZLE(x, x, y, z)
-IMPLEMENT_SWIZZLE(x, x, y, w)
-IMPLEMENT_SWIZZLE(x, x, z, x)
-IMPLEMENT_SWIZZLE(x, x, z, y)
-IMPLEMENT_SWIZZLE(x, x, z, z)
-IMPLEMENT_SWIZZLE(x, x, z, w)
-IMPLEMENT_SWIZZLE(x, x, w, x)
-IMPLEMENT_SWIZZLE(x, x, w, y)
-IMPLEMENT_SWIZZLE(x, x, w, z)
-IMPLEMENT_SWIZZLE(x, x, w, w)
-IMPLEMENT_SWIZZLE(x, y, x, x)
-IMPLEMENT_SWIZZLE(x, y, x, y)
-IMPLEMENT_SWIZZLE(x, y, x, z)
-IMPLEMENT_SWIZZLE(x, y, x, w)
-IMPLEMENT_SWIZZLE(x, y, y, x)
-IMPLEMENT_SWIZZLE(x, y, y, y)
-IMPLEMENT_SWIZZLE(x, y, y, z)
-IMPLEMENT_SWIZZLE(x, y, y, w)
-IMPLEMENT_SWIZZLE(x, y, z, x)
-IMPLEMENT_SWIZZLE(x, y, z, y)
-IMPLEMENT_SWIZZLE(x, y, z, z)
-// IMPLEMENT_SWIZZLE(x, y, z, w)
-IMPLEMENT_SWIZZLE(x, y, w, x)
-IMPLEMENT_SWIZZLE(x, y, w, y)
-IMPLEMENT_SWIZZLE(x, y, w, z)
-IMPLEMENT_SWIZZLE(x, y, w, w)
-IMPLEMENT_SWIZZLE(x, z, x, x)
-IMPLEMENT_SWIZZLE(x, z, x, y)
-IMPLEMENT_SWIZZLE(x, z, x, z)
-IMPLEMENT_SWIZZLE(x, z, x, w)
-IMPLEMENT_SWIZZLE(x, z, y, x)
-IMPLEMENT_SWIZZLE(x, z, y, y)
-IMPLEMENT_SWIZZLE(x, z, y, z)
-IMPLEMENT_SWIZZLE(x, z, y, w)
-IMPLEMENT_SWIZZLE(x, z, z, x)
-IMPLEMENT_SWIZZLE(x, z, z, y)
-IMPLEMENT_SWIZZLE(x, z, z, z)
-IMPLEMENT_SWIZZLE(x, z, z, w)
-IMPLEMENT_SWIZZLE(x, z, w, x)
-IMPLEMENT_SWIZZLE(x, z, w, y)
-IMPLEMENT_SWIZZLE(x, z, w, z)
-IMPLEMENT_SWIZZLE(x, z, w, w)
-IMPLEMENT_SWIZZLE(x, w, x, x)
-IMPLEMENT_SWIZZLE(x, w, x, y)
-IMPLEMENT_SWIZZLE(x, w, x, z)
-IMPLEMENT_SWIZZLE(x, w, x, w)
-IMPLEMENT_SWIZZLE(x, w, y, x)
-IMPLEMENT_SWIZZLE(x, w, y, y)
-IMPLEMENT_SWIZZLE(x, w, y, z)
-IMPLEMENT_SWIZZLE(x, w, y, w)
-IMPLEMENT_SWIZZLE(x, w, z, x)
-IMPLEMENT_SWIZZLE(x, w, z, y)
-IMPLEMENT_SWIZZLE(x, w, z, z)
-IMPLEMENT_SWIZZLE(x, w, z, w)
-IMPLEMENT_SWIZZLE(x, w, w, x)
-IMPLEMENT_SWIZZLE(x, w, w, y)
-IMPLEMENT_SWIZZLE(x, w, w, z)
-IMPLEMENT_SWIZZLE(x, w, w, w)
-IMPLEMENT_SWIZZLE(y, x, x, x)
-IMPLEMENT_SWIZZLE(y, x, x, y)
-IMPLEMENT_SWIZZLE(y, x, x, z)
-IMPLEMENT_SWIZZLE(y, x, x, w)
-IMPLEMENT_SWIZZLE(y, x, y, x)
-IMPLEMENT_SWIZZLE(y, x, y, y)
-IMPLEMENT_SWIZZLE(y, x, y, z)
-IMPLEMENT_SWIZZLE(y, x, y, w)
-IMPLEMENT_SWIZZLE(y, x, z, x)
-IMPLEMENT_SWIZZLE(y, x, z, y)
-IMPLEMENT_SWIZZLE(y, x, z, z)
-IMPLEMENT_SWIZZLE(y, x, z, w)
-IMPLEMENT_SWIZZLE(y, x, w, x)
-IMPLEMENT_SWIZZLE(y, x, w, y)
-IMPLEMENT_SWIZZLE(y, x, w, z)
-IMPLEMENT_SWIZZLE(y, x, w, w)
-IMPLEMENT_SWIZZLE(y, y, x, x)
-IMPLEMENT_SWIZZLE(y, y, x, y)
-IMPLEMENT_SWIZZLE(y, y, x, z)
-IMPLEMENT_SWIZZLE(y, y, x, w)
-IMPLEMENT_SWIZZLE(y, y, y, x)
-IMPLEMENT_SWIZZLE(y, y, y, y)
-IMPLEMENT_SWIZZLE(y, y, y, z)
-IMPLEMENT_SWIZZLE(y, y, y, w)
-IMPLEMENT_SWIZZLE(y, y, z, x)
-IMPLEMENT_SWIZZLE(y, y, z, y)
-IMPLEMENT_SWIZZLE(y, y, z, z)
-IMPLEMENT_SWIZZLE(y, y, z, w)
-IMPLEMENT_SWIZZLE(y, y, w, x)
-IMPLEMENT_SWIZZLE(y, y, w, y)
-IMPLEMENT_SWIZZLE(y, y, w, z)
-IMPLEMENT_SWIZZLE(y, y, w, w)
-IMPLEMENT_SWIZZLE(y, z, x, x)
-IMPLEMENT_SWIZZLE(y, z, x, y)
-IMPLEMENT_SWIZZLE(y, z, x, z)
-IMPLEMENT_SWIZZLE(y, z, x, w)
-IMPLEMENT_SWIZZLE(y, z, y, x)
-IMPLEMENT_SWIZZLE(y, z, y, y)
-IMPLEMENT_SWIZZLE(y, z, y, z)
-IMPLEMENT_SWIZZLE(y, z, y, w)
-IMPLEMENT_SWIZZLE(y, z, z, x)
-IMPLEMENT_SWIZZLE(y, z, z, y)
-IMPLEMENT_SWIZZLE(y, z, z, z)
-IMPLEMENT_SWIZZLE(y, z, z, w)
-IMPLEMENT_SWIZZLE(y, z, w, x)
-IMPLEMENT_SWIZZLE(y, z, w, y)
-IMPLEMENT_SWIZZLE(y, z, w, z)
-IMPLEMENT_SWIZZLE(y, z, w, w)
-IMPLEMENT_SWIZZLE(y, w, x, x)
-IMPLEMENT_SWIZZLE(y, w, x, y)
-IMPLEMENT_SWIZZLE(y, w, x, z)
-IMPLEMENT_SWIZZLE(y, w, x, w)
-IMPLEMENT_SWIZZLE(y, w, y, x)
-IMPLEMENT_SWIZZLE(y, w, y, y)
-IMPLEMENT_SWIZZLE(y, w, y, z)
-IMPLEMENT_SWIZZLE(y, w, y, w)
-IMPLEMENT_SWIZZLE(y, w, z, x)
-IMPLEMENT_SWIZZLE(y, w, z, y)
-IMPLEMENT_SWIZZLE(y, w, z, z)
-IMPLEMENT_SWIZZLE(y, w, z, w)
-IMPLEMENT_SWIZZLE(y, w, w, x)
-IMPLEMENT_SWIZZLE(y, w, w, y)
-IMPLEMENT_SWIZZLE(y, w, w, z)
-IMPLEMENT_SWIZZLE(y, w, w, w)
-IMPLEMENT_SWIZZLE(z, x, x, x)
-IMPLEMENT_SWIZZLE(z, x, x, y)
-IMPLEMENT_SWIZZLE(z, x, x, z)
-IMPLEMENT_SWIZZLE(z, x, x, w)
-IMPLEMENT_SWIZZLE(z, x, y, x)
-IMPLEMENT_SWIZZLE(z, x, y, y)
-IMPLEMENT_SWIZZLE(z, x, y, z)
-IMPLEMENT_SWIZZLE(z, x, y, w)
-IMPLEMENT_SWIZZLE(z, x, z, x)
-IMPLEMENT_SWIZZLE(z, x, z, y)
-IMPLEMENT_SWIZZLE(z, x, z, z)
-IMPLEMENT_SWIZZLE(z, x, z, w)
-IMPLEMENT_SWIZZLE(z, x, w, x)
-IMPLEMENT_SWIZZLE(z, x, w, y)
-IMPLEMENT_SWIZZLE(z, x, w, z)
-IMPLEMENT_SWIZZLE(z, x, w, w)
-IMPLEMENT_SWIZZLE(z, y, x, x)
-IMPLEMENT_SWIZZLE(z, y, x, y)
-IMPLEMENT_SWIZZLE(z, y, x, z)
-IMPLEMENT_SWIZZLE(z, y, x, w)
-IMPLEMENT_SWIZZLE(z, y, y, x)
-IMPLEMENT_SWIZZLE(z, y, y, y)
-IMPLEMENT_SWIZZLE(z, y, y, z)
-IMPLEMENT_SWIZZLE(z, y, y, w)
-IMPLEMENT_SWIZZLE(z, y, z, x)
-IMPLEMENT_SWIZZLE(z, y, z, y)
-IMPLEMENT_SWIZZLE(z, y, z, z)
-IMPLEMENT_SWIZZLE(z, y, z, w)
-IMPLEMENT_SWIZZLE(z, y, w, x)
-IMPLEMENT_SWIZZLE(z, y, w, y)
-IMPLEMENT_SWIZZLE(z, y, w, z)
-IMPLEMENT_SWIZZLE(z, y, w, w)
-IMPLEMENT_SWIZZLE(z, z, x, x)
-IMPLEMENT_SWIZZLE(z, z, x, y)
-IMPLEMENT_SWIZZLE(z, z, x, z)
-IMPLEMENT_SWIZZLE(z, z, x, w)
-IMPLEMENT_SWIZZLE(z, z, y, x)
-IMPLEMENT_SWIZZLE(z, z, y, y)
-IMPLEMENT_SWIZZLE(z, z, y, z)
-IMPLEMENT_SWIZZLE(z, z, y, w)
-IMPLEMENT_SWIZZLE(z, z, z, x)
-IMPLEMENT_SWIZZLE(z, z, z, y)
-IMPLEMENT_SWIZZLE(z, z, z, z)
-IMPLEMENT_SWIZZLE(z, z, z, w)
-IMPLEMENT_SWIZZLE(z, z, w, x)
-IMPLEMENT_SWIZZLE(z, z, w, y)
-IMPLEMENT_SWIZZLE(z, z, w, z)
-IMPLEMENT_SWIZZLE(z, z, w, w)
-IMPLEMENT_SWIZZLE(z, w, x, x)
-IMPLEMENT_SWIZZLE(z, w, x, y)
-IMPLEMENT_SWIZZLE(z, w, x, z)
-IMPLEMENT_SWIZZLE(z, w, x, w)
-IMPLEMENT_SWIZZLE(z, w, y, x)
-IMPLEMENT_SWIZZLE(z, w, y, y)
-IMPLEMENT_SWIZZLE(z, w, y, z)
-IMPLEMENT_SWIZZLE(z, w, y, w)
-IMPLEMENT_SWIZZLE(z, w, z, x)
-IMPLEMENT_SWIZZLE(z, w, z, y)
-IMPLEMENT_SWIZZLE(z, w, z, z)
-IMPLEMENT_SWIZZLE(z, w, z, w)
-IMPLEMENT_SWIZZLE(z, w, w, x)
-IMPLEMENT_SWIZZLE(z, w, w, y)
-IMPLEMENT_SWIZZLE(z, w, w, z)
-IMPLEMENT_SWIZZLE(z, w, w, w)
-IMPLEMENT_SWIZZLE(w, x, x, x)
-IMPLEMENT_SWIZZLE(w, x, x, y)
-IMPLEMENT_SWIZZLE(w, x, x, z)
-IMPLEMENT_SWIZZLE(w, x, x, w)
-IMPLEMENT_SWIZZLE(w, x, y, x)
-IMPLEMENT_SWIZZLE(w, x, y, y)
-IMPLEMENT_SWIZZLE(w, x, y, z)
-IMPLEMENT_SWIZZLE(w, x, y, w)
-IMPLEMENT_SWIZZLE(w, x, z, x)
-IMPLEMENT_SWIZZLE(w, x, z, y)
-IMPLEMENT_SWIZZLE(w, x, z, z)
-IMPLEMENT_SWIZZLE(w, x, z, w)
-IMPLEMENT_SWIZZLE(w, x, w, x)
-IMPLEMENT_SWIZZLE(w, x, w, y)
-IMPLEMENT_SWIZZLE(w, x, w, z)
-IMPLEMENT_SWIZZLE(w, x, w, w)
-IMPLEMENT_SWIZZLE(w, y, x, x)
-IMPLEMENT_SWIZZLE(w, y, x, y)
-IMPLEMENT_SWIZZLE(w, y, x, z)
-IMPLEMENT_SWIZZLE(w, y, x, w)
-IMPLEMENT_SWIZZLE(w, y, y, x)
-IMPLEMENT_SWIZZLE(w, y, y, y)
-IMPLEMENT_SWIZZLE(w, y, y, z)
-IMPLEMENT_SWIZZLE(w, y, y, w)
-IMPLEMENT_SWIZZLE(w, y, z, x)
-IMPLEMENT_SWIZZLE(w, y, z, y)
-IMPLEMENT_SWIZZLE(w, y, z, z)
-IMPLEMENT_SWIZZLE(w, y, z, w)
-IMPLEMENT_SWIZZLE(w, y, w, x)
-IMPLEMENT_SWIZZLE(w, y, w, y)
-IMPLEMENT_SWIZZLE(w, y, w, z)
-IMPLEMENT_SWIZZLE(w, y, w, w)
-IMPLEMENT_SWIZZLE(w, z, x, x)
-IMPLEMENT_SWIZZLE(w, z, x, y)
-IMPLEMENT_SWIZZLE(w, z, x, z)
-IMPLEMENT_SWIZZLE(w, z, x, w)
-IMPLEMENT_SWIZZLE(w, z, y, x)
-IMPLEMENT_SWIZZLE(w, z, y, y)
-IMPLEMENT_SWIZZLE(w, z, y, z)
-IMPLEMENT_SWIZZLE(w, z, y, w)
-IMPLEMENT_SWIZZLE(w, z, z, x)
-IMPLEMENT_SWIZZLE(w, z, z, y)
-IMPLEMENT_SWIZZLE(w, z, z, z)
-IMPLEMENT_SWIZZLE(w, z, z, w)
-IMPLEMENT_SWIZZLE(w, z, w, x)
-IMPLEMENT_SWIZZLE(w, z, w, y)
-IMPLEMENT_SWIZZLE(w, z, w, z)
-IMPLEMENT_SWIZZLE(w, z, w, w)
-IMPLEMENT_SWIZZLE(w, w, x, x)
-IMPLEMENT_SWIZZLE(w, w, x, y)
-IMPLEMENT_SWIZZLE(w, w, x, z)
-IMPLEMENT_SWIZZLE(w, w, x, w)
-IMPLEMENT_SWIZZLE(w, w, y, x)
-IMPLEMENT_SWIZZLE(w, w, y, y)
-IMPLEMENT_SWIZZLE(w, w, y, z)
-IMPLEMENT_SWIZZLE(w, w, y, w)
-IMPLEMENT_SWIZZLE(w, w, z, x)
-IMPLEMENT_SWIZZLE(w, w, z, y)
-IMPLEMENT_SWIZZLE(w, w, z, z)
-IMPLEMENT_SWIZZLE(w, w, z, w)
-IMPLEMENT_SWIZZLE(w, w, w, x)
-IMPLEMENT_SWIZZLE(w, w, w, y)
-IMPLEMENT_SWIZZLE(w, w, w, z)
-IMPLEMENT_SWIZZLE(w, w, w, w)
diff --git a/3rdparty/bx/include/bx/float4_t.h b/3rdparty/bx/include/bx/float4_t.h
deleted file mode 100644
index 78fd4e8ebb5..00000000000
--- a/3rdparty/bx/include/bx/float4_t.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#ifndef BX_FLOAT4_T_H_HEADER_GUARD
-#define BX_FLOAT4_T_H_HEADER_GUARD
-
-#include "bx.h"
-
-#define BX_FLOAT4_FORCE_INLINE BX_FORCE_INLINE
-#define BX_FLOAT4_INLINE static inline
-
-#if defined(__SSE2__) || (BX_COMPILER_MSVC && (BX_ARCH_64BIT || _M_IX86_FP >= 2) )
-# include "float4_sse.h"
-#elif defined(__ARM_NEON__) && !BX_COMPILER_CLANG
-# include "float4_neon.h"
-#elif BX_COMPILER_CLANG \
- && !BX_PLATFORM_EMSCRIPTEN \
- && !BX_PLATFORM_IOS \
- && BX_CLANG_HAS_EXTENSION(attribute_ext_vector_type)
-# include "float4_langext.h"
-#else
-# ifndef BX_FLOAT4_WARN_REFERENCE_IMPL
-# define BX_FLOAT4_WARN_REFERENCE_IMPL 0
-# endif // BX_FLOAT4_WARN_REFERENCE_IMPL
-
-# if BX_FLOAT4_WARN_REFERENCE_IMPL
-# pragma message("************************************\nUsing SIMD reference implementation!\n************************************")
-# endif // BX_FLOAT4_WARN_REFERENCE_IMPL
-
-# include "float4_ref.h"
-#endif //
-
-#endif // BX_FLOAT4_T_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4x4_t.h b/3rdparty/bx/include/bx/float4x4_t.h
index e1bc4e1ca61..269dd633d3d 100644
--- a/3rdparty/bx/include/bx/float4x4_t.h
+++ b/3rdparty/bx/include/bx/float4x4_t.h
@@ -6,151 +6,151 @@
#ifndef BX_FLOAT4X4_H_HEADER_GUARD
#define BX_FLOAT4X4_H_HEADER_GUARD
-#include "float4_t.h"
+#include "simd_t.h"
namespace bx
{
BX_ALIGN_DECL_16(struct) float4x4_t
{
- float4_t col[4];
+ simd128_t col[4];
};
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul_xyz1(float4_t _a, const float4x4_t* _b)
+ BX_SIMD_FORCE_INLINE simd128_t simd_mul_xyz1(simd128_t _a, const float4x4_t* _b)
{
- const float4_t xxxx = float4_swiz_xxxx(_a);
- const float4_t yyyy = float4_swiz_yyyy(_a);
- const float4_t zzzz = float4_swiz_zzzz(_a);
- const float4_t col0 = float4_mul(_b->col[0], xxxx);
- const float4_t col1 = float4_mul(_b->col[1], yyyy);
- const float4_t col2 = float4_madd(_b->col[2], zzzz, col0);
- const float4_t col3 = float4_add(_b->col[3], col1);
- const float4_t result = float4_add(col2, col3);
+ const simd128_t xxxx = simd_swiz_xxxx(_a);
+ const simd128_t yyyy = simd_swiz_yyyy(_a);
+ const simd128_t zzzz = simd_swiz_zzzz(_a);
+ const simd128_t col0 = simd_mul(_b->col[0], xxxx);
+ const simd128_t col1 = simd_mul(_b->col[1], yyyy);
+ const simd128_t col2 = simd_madd(_b->col[2], zzzz, col0);
+ const simd128_t col3 = simd_add(_b->col[3], col1);
+ const simd128_t result = simd_add(col2, col3);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul(float4_t _a, const float4x4_t* _b)
+ BX_SIMD_FORCE_INLINE simd128_t simd_mul(simd128_t _a, const float4x4_t* _b)
{
- const float4_t xxxx = float4_swiz_xxxx(_a);
- const float4_t yyyy = float4_swiz_yyyy(_a);
- const float4_t zzzz = float4_swiz_zzzz(_a);
- const float4_t wwww = float4_swiz_wwww(_a);
- const float4_t col0 = float4_mul(_b->col[0], xxxx);
- const float4_t col1 = float4_mul(_b->col[1], yyyy);
- const float4_t col2 = float4_madd(_b->col[2], zzzz, col0);
- const float4_t col3 = float4_madd(_b->col[3], wwww, col1);
- const float4_t result = float4_add(col2, col3);
+ const simd128_t xxxx = simd_swiz_xxxx(_a);
+ const simd128_t yyyy = simd_swiz_yyyy(_a);
+ const simd128_t zzzz = simd_swiz_zzzz(_a);
+ const simd128_t wwww = simd_swiz_wwww(_a);
+ const simd128_t col0 = simd_mul(_b->col[0], xxxx);
+ const simd128_t col1 = simd_mul(_b->col[1], yyyy);
+ const simd128_t col2 = simd_madd(_b->col[2], zzzz, col0);
+ const simd128_t col3 = simd_madd(_b->col[3], wwww, col1);
+ const simd128_t result = simd_add(col2, col3);
return result;
}
- BX_FLOAT4_INLINE void float4x4_mul(float4x4_t* __restrict _result, const float4x4_t* __restrict _a, const float4x4_t* __restrict _b)
+ BX_SIMD_INLINE void float4x4_mul(float4x4_t* __restrict _result, const float4x4_t* __restrict _a, const float4x4_t* __restrict _b)
{
- _result->col[0] = float4_mul(_a->col[0], _b);
- _result->col[1] = float4_mul(_a->col[1], _b);
- _result->col[2] = float4_mul(_a->col[2], _b);
- _result->col[3] = float4_mul(_a->col[3], _b);
+ _result->col[0] = simd_mul(_a->col[0], _b);
+ _result->col[1] = simd_mul(_a->col[1], _b);
+ _result->col[2] = simd_mul(_a->col[2], _b);
+ _result->col[3] = simd_mul(_a->col[3], _b);
}
- BX_FLOAT4_FORCE_INLINE void float4x4_transpose(float4x4_t* __restrict _result, const float4x4_t* __restrict _mtx)
+ BX_SIMD_FORCE_INLINE void float4x4_transpose(float4x4_t* __restrict _result, const float4x4_t* __restrict _mtx)
{
- const float4_t aibj = float4_shuf_xAyB(_mtx->col[0], _mtx->col[2]); // aibj
- const float4_t emfn = float4_shuf_xAyB(_mtx->col[1], _mtx->col[3]); // emfn
- const float4_t ckdl = float4_shuf_zCwD(_mtx->col[0], _mtx->col[2]); // ckdl
- const float4_t gohp = float4_shuf_zCwD(_mtx->col[1], _mtx->col[3]); // gohp
- _result->col[0] = float4_shuf_xAyB(aibj, emfn); // aeim
- _result->col[1] = float4_shuf_zCwD(aibj, emfn); // bfjn
- _result->col[2] = float4_shuf_xAyB(ckdl, gohp); // cgko
- _result->col[3] = float4_shuf_zCwD(ckdl, gohp); // dhlp
+ const simd128_t aibj = simd_shuf_xAyB(_mtx->col[0], _mtx->col[2]); // aibj
+ const simd128_t emfn = simd_shuf_xAyB(_mtx->col[1], _mtx->col[3]); // emfn
+ const simd128_t ckdl = simd_shuf_zCwD(_mtx->col[0], _mtx->col[2]); // ckdl
+ const simd128_t gohp = simd_shuf_zCwD(_mtx->col[1], _mtx->col[3]); // gohp
+ _result->col[0] = simd_shuf_xAyB(aibj, emfn); // aeim
+ _result->col[1] = simd_shuf_zCwD(aibj, emfn); // bfjn
+ _result->col[2] = simd_shuf_xAyB(ckdl, gohp); // cgko
+ _result->col[3] = simd_shuf_zCwD(ckdl, gohp); // dhlp
}
- BX_FLOAT4_INLINE void float4x4_inverse(float4x4_t* __restrict _result, const float4x4_t* __restrict _a)
+ BX_SIMD_INLINE void float4x4_inverse(float4x4_t* __restrict _result, const float4x4_t* __restrict _a)
{
- const float4_t tmp0 = float4_shuf_xAzC(_a->col[0], _a->col[1]);
- const float4_t tmp1 = float4_shuf_xAzC(_a->col[2], _a->col[3]);
- const float4_t tmp2 = float4_shuf_yBwD(_a->col[0], _a->col[1]);
- const float4_t tmp3 = float4_shuf_yBwD(_a->col[2], _a->col[3]);
- const float4_t t0 = float4_shuf_xyAB(tmp0, tmp1);
- const float4_t t1 = float4_shuf_xyAB(tmp3, tmp2);
- const float4_t t2 = float4_shuf_zwCD(tmp0, tmp1);
- const float4_t t3 = float4_shuf_zwCD(tmp3, tmp2);
-
- const float4_t t23 = float4_mul(t2, t3);
- const float4_t t23_yxwz = float4_swiz_yxwz(t23);
- const float4_t t23_wzyx = float4_swiz_wzyx(t23);
-
- float4_t cof0, cof1, cof2, cof3;
-
- const float4_t zero = float4_zero();
- cof0 = float4_nmsub(t1, t23_yxwz, zero);
- cof0 = float4_madd(t1, t23_wzyx, cof0);
-
- cof1 = float4_nmsub(t0, t23_yxwz, zero);
- cof1 = float4_madd(t0, t23_wzyx, cof1);
- cof1 = float4_swiz_zwxy(cof1);
-
- const float4_t t12 = float4_mul(t1, t2);
- const float4_t t12_yxwz = float4_swiz_yxwz(t12);
- const float4_t t12_wzyx = float4_swiz_wzyx(t12);
-
- cof0 = float4_madd(t3, t12_yxwz, cof0);
- cof0 = float4_nmsub(t3, t12_wzyx, cof0);
-
- cof3 = float4_mul(t0, t12_yxwz);
- cof3 = float4_nmsub(t0, t12_wzyx, cof3);
- cof3 = float4_swiz_zwxy(cof3);
-
- const float4_t t1_zwxy = float4_swiz_zwxy(t1);
- const float4_t t2_zwxy = float4_swiz_zwxy(t2);
-
- const float4_t t13 = float4_mul(t1_zwxy, t3);
- const float4_t t13_yxwz = float4_swiz_yxwz(t13);
- const float4_t t13_wzyx = float4_swiz_wzyx(t13);
-
- cof0 = float4_madd(t2_zwxy, t13_yxwz, cof0);
- cof0 = float4_nmsub(t2_zwxy, t13_wzyx, cof0);
-
- cof2 = float4_mul(t0, t13_yxwz);
- cof2 = float4_nmsub(t0, t13_wzyx, cof2);
- cof2 = float4_swiz_zwxy(cof2);
-
- const float4_t t01 = float4_mul(t0, t1);
- const float4_t t01_yxwz = float4_swiz_yxwz(t01);
- const float4_t t01_wzyx = float4_swiz_wzyx(t01);
-
- cof2 = float4_nmsub(t3, t01_yxwz, cof2);
- cof2 = float4_madd(t3, t01_wzyx, cof2);
-
- cof3 = float4_madd(t2_zwxy, t01_yxwz, cof3);
- cof3 = float4_nmsub(t2_zwxy, t01_wzyx, cof3);
-
- const float4_t t03 = float4_mul(t0, t3);
- const float4_t t03_yxwz = float4_swiz_yxwz(t03);
- const float4_t t03_wzyx = float4_swiz_wzyx(t03);
-
- cof1 = float4_nmsub(t2_zwxy, t03_yxwz, cof1);
- cof1 = float4_madd(t2_zwxy, t03_wzyx, cof1);
-
- cof2 = float4_madd(t1, t03_yxwz, cof2);
- cof2 = float4_nmsub(t1, t03_wzyx, cof2);
-
- const float4_t t02 = float4_mul(t0, t2_zwxy);
- const float4_t t02_yxwz = float4_swiz_yxwz(t02);
- const float4_t t02_wzyx = float4_swiz_wzyx(t02);
-
- cof1 = float4_madd(t3, t02_yxwz, cof1);
- cof1 = float4_nmsub(t3, t02_wzyx, cof1);
-
- cof3 = float4_nmsub(t1, t02_yxwz, cof3);
- cof3 = float4_madd(t1, t02_wzyx, cof3);
-
- const float4_t det = float4_dot(t0, cof0);
- const float4_t invdet = float4_rcp(det);
-
- _result->col[0] = float4_mul(cof0, invdet);
- _result->col[1] = float4_mul(cof1, invdet);
- _result->col[2] = float4_mul(cof2, invdet);
- _result->col[3] = float4_mul(cof3, invdet);
+ const simd128_t tmp0 = simd_shuf_xAzC(_a->col[0], _a->col[1]);
+ const simd128_t tmp1 = simd_shuf_xAzC(_a->col[2], _a->col[3]);
+ const simd128_t tmp2 = simd_shuf_yBwD(_a->col[0], _a->col[1]);
+ const simd128_t tmp3 = simd_shuf_yBwD(_a->col[2], _a->col[3]);
+ const simd128_t t0 = simd_shuf_xyAB(tmp0, tmp1);
+ const simd128_t t1 = simd_shuf_xyAB(tmp3, tmp2);
+ const simd128_t t2 = simd_shuf_zwCD(tmp0, tmp1);
+ const simd128_t t3 = simd_shuf_zwCD(tmp3, tmp2);
+
+ const simd128_t t23 = simd_mul(t2, t3);
+ const simd128_t t23_yxwz = simd_swiz_yxwz(t23);
+ const simd128_t t23_wzyx = simd_swiz_wzyx(t23);
+
+ simd128_t cof0, cof1, cof2, cof3;
+
+ const simd128_t zero = simd_zero();
+ cof0 = simd_nmsub(t1, t23_yxwz, zero);
+ cof0 = simd_madd(t1, t23_wzyx, cof0);
+
+ cof1 = simd_nmsub(t0, t23_yxwz, zero);
+ cof1 = simd_madd(t0, t23_wzyx, cof1);
+ cof1 = simd_swiz_zwxy(cof1);
+
+ const simd128_t t12 = simd_mul(t1, t2);
+ const simd128_t t12_yxwz = simd_swiz_yxwz(t12);
+ const simd128_t t12_wzyx = simd_swiz_wzyx(t12);
+
+ cof0 = simd_madd(t3, t12_yxwz, cof0);
+ cof0 = simd_nmsub(t3, t12_wzyx, cof0);
+
+ cof3 = simd_mul(t0, t12_yxwz);
+ cof3 = simd_nmsub(t0, t12_wzyx, cof3);
+ cof3 = simd_swiz_zwxy(cof3);
+
+ const simd128_t t1_zwxy = simd_swiz_zwxy(t1);
+ const simd128_t t2_zwxy = simd_swiz_zwxy(t2);
+
+ const simd128_t t13 = simd_mul(t1_zwxy, t3);
+ const simd128_t t13_yxwz = simd_swiz_yxwz(t13);
+ const simd128_t t13_wzyx = simd_swiz_wzyx(t13);
+
+ cof0 = simd_madd(t2_zwxy, t13_yxwz, cof0);
+ cof0 = simd_nmsub(t2_zwxy, t13_wzyx, cof0);
+
+ cof2 = simd_mul(t0, t13_yxwz);
+ cof2 = simd_nmsub(t0, t13_wzyx, cof2);
+ cof2 = simd_swiz_zwxy(cof2);
+
+ const simd128_t t01 = simd_mul(t0, t1);
+ const simd128_t t01_yxwz = simd_swiz_yxwz(t01);
+ const simd128_t t01_wzyx = simd_swiz_wzyx(t01);
+
+ cof2 = simd_nmsub(t3, t01_yxwz, cof2);
+ cof2 = simd_madd(t3, t01_wzyx, cof2);
+
+ cof3 = simd_madd(t2_zwxy, t01_yxwz, cof3);
+ cof3 = simd_nmsub(t2_zwxy, t01_wzyx, cof3);
+
+ const simd128_t t03 = simd_mul(t0, t3);
+ const simd128_t t03_yxwz = simd_swiz_yxwz(t03);
+ const simd128_t t03_wzyx = simd_swiz_wzyx(t03);
+
+ cof1 = simd_nmsub(t2_zwxy, t03_yxwz, cof1);
+ cof1 = simd_madd(t2_zwxy, t03_wzyx, cof1);
+
+ cof2 = simd_madd(t1, t03_yxwz, cof2);
+ cof2 = simd_nmsub(t1, t03_wzyx, cof2);
+
+ const simd128_t t02 = simd_mul(t0, t2_zwxy);
+ const simd128_t t02_yxwz = simd_swiz_yxwz(t02);
+ const simd128_t t02_wzyx = simd_swiz_wzyx(t02);
+
+ cof1 = simd_madd(t3, t02_yxwz, cof1);
+ cof1 = simd_nmsub(t3, t02_wzyx, cof1);
+
+ cof3 = simd_nmsub(t1, t02_yxwz, cof3);
+ cof3 = simd_madd(t1, t02_wzyx, cof3);
+
+ const simd128_t det = simd_dot(t0, cof0);
+ const simd128_t invdet = simd_rcp(det);
+
+ _result->col[0] = simd_mul(cof0, invdet);
+ _result->col[1] = simd_mul(cof1, invdet);
+ _result->col[2] = simd_mul(cof2, invdet);
+ _result->col[3] = simd_mul(cof3, invdet);
}
} // namespace bx
diff --git a/3rdparty/bx/include/bx/fpumath.h b/3rdparty/bx/include/bx/fpumath.h
index 5f64564f688..7d7a2671bb2 100644
--- a/3rdparty/bx/include/bx/fpumath.h
+++ b/3rdparty/bx/include/bx/fpumath.h
@@ -341,7 +341,7 @@ namespace bx
_result[2] = 1.0f / _a[2];
}
- inline void vec3TangentFrame(const float* _n, float* _t, float* _b)
+ inline void vec3TangentFrame(const float* __restrict _n, float* __restrict _t, float* __restrict _b)
{
const float nx = _n[0];
const float ny = _n[1];
@@ -550,6 +550,30 @@ namespace bx
_result[15] = 1.0f;
}
+ inline void mtxScale(float* _result, float _scale)
+ {
+ mtxScale(_result, _scale, _scale, _scale);
+ }
+
+ inline void mtxFromNormal(float* __restrict _result, const float* __restrict _normal, float _scale, const float* __restrict _pos)
+ {
+ float tangent[3];
+ float bitangent[3];
+ vec3TangentFrame(_normal, tangent, bitangent);
+
+ vec3Mul(&_result[ 0], bitangent, _scale);
+ vec3Mul(&_result[ 4], _normal, _scale);
+ vec3Mul(&_result[ 8], tangent, _scale);
+
+ _result[ 3] = 0.0f;
+ _result[ 7] = 0.0f;
+ _result[11] = 0.0f;
+ _result[12] = _pos[0];
+ _result[13] = _pos[1];
+ _result[14] = _pos[2];
+ _result[15] = 1.0f;
+ }
+
inline void mtxQuat(float* __restrict _result, const float* __restrict _quat)
{
const float x = _quat[0];
diff --git a/3rdparty/bx/include/bx/handlealloc.h b/3rdparty/bx/include/bx/handlealloc.h
index 7b10209ab2d..aad39cdd6f3 100644
--- a/3rdparty/bx/include/bx/handlealloc.h
+++ b/3rdparty/bx/include/bx/handlealloc.h
@@ -151,8 +151,6 @@ namespace bx
static const uint16_t invalid = UINT16_MAX;
HandleListT()
- : m_front(invalid)
- , m_back(invalid)
{
reset();
}
@@ -250,6 +248,8 @@ namespace bx
void reset()
{
memset(m_links, 0xff, sizeof(m_links) );
+ m_front = invalid;
+ m_back = invalid;
}
private:
diff --git a/3rdparty/bx/include/bx/macros.h b/3rdparty/bx/include/bx/macros.h
index b4eaeae9287..870f8726738 100644
--- a/3rdparty/bx/include/bx/macros.h
+++ b/3rdparty/bx/include/bx/macros.h
@@ -63,7 +63,7 @@
#if BX_COMPILER_GCC || BX_COMPILER_CLANG
# define BX_ALIGN_DECL(_align, _decl) _decl __attribute__( (aligned(_align) ) )
# define BX_ALLOW_UNUSED __attribute__( (unused) )
-# define BX_FORCE_INLINE __extension__ static __inline __attribute__( (__always_inline__) )
+# define BX_FORCE_INLINE inline __attribute__( (__always_inline__) )
# define BX_FUNCTION __PRETTY_FUNCTION__
# define BX_LIKELY(_x) __builtin_expect(!!(_x), 1)
# define BX_UNLIKELY(_x) __builtin_expect(!!(_x), 0)
@@ -71,7 +71,7 @@
# define BX_NO_RETURN __attribute__( (noreturn) )
# define BX_NO_VTABLE
# define BX_OVERRIDE
-# define BX_PRINTF_ARGS(_format, _args) __attribute__ ( (format(__printf__, _format, _args) ) )
+# define BX_PRINTF_ARGS(_format, _args) __attribute__( (format(__printf__, _format, _args) ) )
# if BX_CLANG_HAS_FEATURE(cxx_thread_local)
# define BX_THREAD_LOCAL __thread
# endif // BX_COMPILER_CLANG
@@ -79,9 +79,9 @@
# define BX_THREAD_LOCAL __thread
# endif // BX_COMPILER_GCC
# define BX_ATTRIBUTE(_x) __attribute__( (_x) )
-# if BX_COMPILER_MSVC_COMPATIBLE
+# if BX_CRT_MSVC
# define __stdcall
-# endif // BX_COMPILER_MSVC_COMPATIBLE
+# endif // BX_CRT_MSVC
#elif BX_COMPILER_MSVC
# define BX_ALIGN_DECL(_align, _decl) __declspec(align(_align) ) _decl
# define BX_ALLOW_UNUSED
diff --git a/3rdparty/bx/include/bx/os.h b/3rdparty/bx/include/bx/os.h
index 80ce4d5c5f9..0bdeb3c1eba 100644
--- a/3rdparty/bx/include/bx/os.h
+++ b/3rdparty/bx/include/bx/os.h
@@ -53,17 +53,17 @@
# elif BX_PLATFORM_OSX
# include <mach/mach.h> // mach_task_basic_info
# elif BX_PLATFORM_HURD
-# include <pthread/pthread.h> // pthread_self
+# include <unistd.h> // getpid
# elif BX_PLATFORM_ANDROID
# include "debug.h" // getTid is not implemented...
# endif // BX_PLATFORM_ANDROID
#endif // BX_PLATFORM_
-#if BX_COMPILER_MSVC_COMPATIBLE
+#if BX_CRT_MSVC
# include <direct.h> // _getcwd
#else
# include <unistd.h> // getcwd
-#endif // BX_COMPILER_MSVC
+#endif // BX_CRT_MSVC
#if BX_PLATFORM_OSX
# define BX_DL_EXT "dylib"
@@ -259,7 +259,7 @@ namespace bx
|| BX_PLATFORM_WINRT
BX_UNUSED(_path);
return -1;
-#elif BX_COMPILER_MSVC_COMPATIBLE
+#elif BX_CRT_MSVC
return ::_chdir(_path);
#else
return ::chdir(_path);
@@ -273,7 +273,7 @@ namespace bx
|| BX_PLATFORM_WINRT
BX_UNUSED(_buffer, _size);
return NULL;
-#elif BX_COMPILER_MSVC_COMPATIBLE
+#elif BX_CRT_MSVC
return ::_getcwd(_buffer, (int)_size);
#else
return ::getcwd(_buffer, _size);
diff --git a/3rdparty/bx/include/bx/platform.h b/3rdparty/bx/include/bx/platform.h
index 1c7b7d01414..02a80045285 100644
--- a/3rdparty/bx/include/bx/platform.h
+++ b/3rdparty/bx/include/bx/platform.h
@@ -15,7 +15,6 @@
#define BX_COMPILER_CLANG_ANALYZER 0
#define BX_COMPILER_GCC 0
#define BX_COMPILER_MSVC 0
-#define BX_COMPILER_MSVC_COMPATIBLE (BX_CRT_MSVC)
// Endianess
#define BX_CPU_ENDIAN_BIG 0
diff --git a/3rdparty/bx/include/bx/readerwriter.h b/3rdparty/bx/include/bx/readerwriter.h
index 5460505a42e..47c5899d3f1 100644
--- a/3rdparty/bx/include/bx/readerwriter.h
+++ b/3rdparty/bx/include/bx/readerwriter.h
@@ -16,7 +16,7 @@
#include "error.h"
#include "uint32_t.h"
-#if BX_COMPILER_MSVC_COMPATIBLE
+#if BX_CRT_MSVC
# define fseeko64 _fseeki64
# define ftello64 _ftelli64
#elif BX_PLATFORM_ANDROID || BX_PLATFORM_BSD || BX_PLATFORM_IOS || BX_PLATFORM_OSX || BX_PLATFORM_QNX
@@ -401,7 +401,7 @@ namespace bx
}
int64_t remainder = m_top-m_pos;
- int32_t size = uint32_min(_size, int32_t(remainder > INT32_MAX ? INT32_MAX : remainder) );
+ int32_t size = uint32_min(_size, uint32_t(int64_min(remainder, INT32_MAX) ) );
m_pos += size;
if (size != _size)
{
@@ -454,7 +454,7 @@ namespace bx
BX_CHECK(NULL != _err, "Reader/Writer interface calling functions must handle errors.");
int64_t remainder = m_top-m_pos;
- int32_t size = uint32_min(_size, int32_t(remainder > INT32_MAX ? INT32_MAX : remainder) );
+ int32_t size = uint32_min(_size, uint32_t(int64_min(remainder, INT32_MAX) ) );
memcpy(_data, &m_data[m_pos], size);
m_pos += size;
if (size != _size)
@@ -535,7 +535,7 @@ namespace bx
}
int64_t remainder = m_size-m_pos;
- int32_t size = uint32_min(_size, int32_t(remainder > INT32_MAX ? INT32_MAX : remainder) );
+ int32_t size = uint32_min(_size, uint32_t(int64_min(remainder, INT32_MAX) ) );
memcpy(&m_data[m_pos], _data, size);
m_pos += size;
m_top = int64_max(m_top, m_pos);
diff --git a/3rdparty/bx/include/bx/simd128_langext.inl b/3rdparty/bx/include/bx/simd128_langext.inl
new file mode 100644
index 00000000000..c89e6123349
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd128_langext.inl
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD128_LANGEXT_H_HEADER_GUARD
+#define BX_SIMD128_LANGEXT_H_HEADER_GUARD
+
+#define simd_rcp simd_rcp_ni
+#define simd_orx simd_orx_ni
+#define simd_orc simd_orc_ni
+#define simd_neg simd_neg_ni
+#define simd_madd simd_madd_ni
+#define simd_nmsub simd_nmsub_ni
+#define simd_div_nr simd_div_nr_ni
+#define simd_selb simd_selb_ni
+#define simd_sels simd_sels_ni
+#define simd_not simd_not_ni
+#define simd_abs simd_abs_ni
+#define simd_clamp simd_clamp_ni
+#define simd_lerp simd_lerp_ni
+#define simd_rcp_est simd_rcp_ni
+#define simd_rsqrt simd_rsqrt_ni
+#define simd_rsqrt_nr simd_rsqrt_nr_ni
+#define simd_rsqrt_carmack simd_rsqrt_carmack_ni
+#define simd_sqrt_nr simd_sqrt_nr_ni
+#define simd_log2 simd_log2_ni
+#define simd_exp2 simd_exp2_ni
+#define simd_pow simd_pow_ni
+#define simd_cross3 simd_cross3_ni
+#define simd_normalize3 simd_normalize3_ni
+#define simd_dot3 simd_dot3_ni
+#define simd_dot simd_dot_ni
+#define simd_ceil simd_ceil_ni
+#define simd_floor simd_floor_ni
+#define simd_min simd_min_ni
+#define simd_max simd_max_ni
+#define simd_imin simd_imin_ni
+#define simd_imax simd_imax_ni
+
+#include "simd_ni.inl"
+
+namespace bx
+{
+#define ELEMx 0
+#define ELEMy 1
+#define ELEMz 2
+#define ELEMw 3
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ template<> \
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_swiz_##_x##_y##_z##_w(simd128_langext_t _a) \
+ { \
+ simd128_langext_t result; \
+ result.vf = __builtin_shufflevector(_a.vf, _a.vf, ELEM##_x, ELEM##_y, ELEM##_z, ELEM##_w); \
+ return result; \
+ }
+
+#include "simd128_swizzle.inl"
+
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
+#undef ELEMw
+#undef ELEMz
+#undef ELEMy
+#undef ELEMx
+
+#define BX_SIMD128_IMPLEMENT_TEST(_xyzw, _mask) \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_any_##_xyzw(simd128_langext_t _test) \
+ { \
+ uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
+ | ( (_test.uxyzw[2]>>31)<<2) \
+ | ( (_test.uxyzw[1]>>31)<<1) \
+ | ( _test.uxyzw[0]>>31) \
+ ; \
+ return 0 != (tmp&(_mask) ); \
+ } \
+ \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_all_##_xyzw(simd128_langext_t _test) \
+ { \
+ uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
+ | ( (_test.uxyzw[2]>>31)<<2) \
+ | ( (_test.uxyzw[1]>>31)<<1) \
+ | ( _test.uxyzw[0]>>31) \
+ ; \
+ return (_mask) == (tmp&(_mask) ); \
+ }
+
+BX_SIMD128_IMPLEMENT_TEST(x , 0x1);
+BX_SIMD128_IMPLEMENT_TEST(y , 0x2);
+BX_SIMD128_IMPLEMENT_TEST(xy , 0x3);
+BX_SIMD128_IMPLEMENT_TEST(z , 0x4);
+BX_SIMD128_IMPLEMENT_TEST(xz , 0x5);
+BX_SIMD128_IMPLEMENT_TEST(yz , 0x6);
+BX_SIMD128_IMPLEMENT_TEST(xyz , 0x7);
+BX_SIMD128_IMPLEMENT_TEST(w , 0x8);
+BX_SIMD128_IMPLEMENT_TEST(xw , 0x9);
+BX_SIMD128_IMPLEMENT_TEST(yw , 0xa);
+BX_SIMD128_IMPLEMENT_TEST(xyw , 0xb);
+BX_SIMD128_IMPLEMENT_TEST(zw , 0xc);
+BX_SIMD128_IMPLEMENT_TEST(xzw , 0xd);
+BX_SIMD128_IMPLEMENT_TEST(yzw , 0xe);
+BX_SIMD128_IMPLEMENT_TEST(xyzw , 0xf);
+
+#undef BX_SIMD128_IMPLEMENT_TEST
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_xyAB(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 1, 4, 5);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_ABxy(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 4, 5, 0, 1);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_CDzw(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 6, 7, 2, 3);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_zwCD(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 2, 3, 6, 7);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_xAyB(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 4, 1, 5);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_yBxA(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 1, 5, 0, 4);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_zCwD(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 2, 6, 3, 7);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_CzDw(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 6, 2, 7, 3);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_xAzC(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 0, 4, 2, 6);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_shuf_yBwD(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_shufflevector(_a.vf, _b.vf, 1, 5, 3, 7);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_x(simd128_langext_t _a)
+ {
+ return _a.fxyzw[0];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_y(simd128_langext_t _a)
+ {
+ return _a.fxyzw[1];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_z(simd128_langext_t _a)
+ {
+ return _a.fxyzw[2];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_w(simd128_langext_t _a)
+ {
+ return _a.fxyzw[3];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_ld(const void* _ptr)
+ {
+ const uint32_t* input = reinterpret_cast<const uint32_t*>(_ptr);
+ simd128_langext_t result;
+ result.uxyzw[0] = input[0];
+ result.uxyzw[1] = input[1];
+ result.uxyzw[2] = input[2];
+ result.uxyzw[3] = input[3];
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_st(void* _ptr, simd128_langext_t _a)
+ {
+ uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
+ result[0] = _a.uxyzw[0];
+ result[1] = _a.uxyzw[1];
+ result[2] = _a.uxyzw[2];
+ result[3] = _a.uxyzw[3];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stx(void* _ptr, simd128_langext_t _a)
+ {
+ uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
+ result[0] = _a.uxyzw[0];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stream(void* _ptr, simd128_langext_t _a)
+ {
+ uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
+ result[0] = _a.uxyzw[0];
+ result[1] = _a.uxyzw[1];
+ result[2] = _a.uxyzw[2];
+ result[3] = _a.uxyzw[3];
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_ld(float _x, float _y, float _z, float _w)
+ {
+ simd128_langext_t result;
+ result.vf = (float __attribute__((vector_size(16)))){ _x, _y, _z, _w };
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
+ {
+ simd128_langext_t result;
+ result.vu = (uint32_t __attribute__((vector_size(16)))){ _x, _y, _z, _w };
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_splat(const void* _ptr)
+ {
+ const uint32_t val = *reinterpret_cast<const uint32_t*>(_ptr);
+ simd128_langext_t result;
+ result.vu = (uint32_t __attribute__((vector_size(16)))){ val, val, val, val };
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_splat(float _a)
+ {
+ return simd_ld<simd128_langext_t>(_a, _a, _a, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_isplat(uint32_t _a)
+ {
+ return simd_ild<simd128_langext_t>(_a, _a, _a, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_zero()
+ {
+ return simd_ild<simd128_langext_t>(0, 0, 0, 0);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_itof(simd128_langext_t _a)
+ {
+ simd128_langext_t result;
+ result.vf = __builtin_convertvector(_a.vi, float __attribute__((vector_size(16))) );
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_ftoi(simd128_langext_t _a)
+ {
+ simd128_langext_t result;
+ result.vi = __builtin_convertvector(_a.vf, int32_t __attribute__((vector_size(16))) );
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_round(simd128_langext_t _a)
+ {
+ const simd128_langext_t tmp = simd_ftoi(_a);
+ const simd128_langext_t result = simd_itof(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_add(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = _a.vf + _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_sub(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = _a.vf - _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_mul(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = _a.vf * _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_div(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vf = _a.vf / _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_sqrt(simd128_langext_t _a)
+ {
+ simd128_langext_t result;
+ result.vf[0] = sqrtf(_a.vf[0]);
+ result.vf[1] = sqrtf(_a.vf[1]);
+ result.vf[2] = sqrtf(_a.vf[2]);
+ result.vf[3] = sqrtf(_a.vf[3]);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_rsqrt_est(simd128_langext_t _a)
+ {
+ simd128_langext_t result;
+ result.vf[0] = 1.0f / sqrtf(_a.vf[0]);
+ result.vf[1] = 1.0f / sqrtf(_a.vf[1]);
+ result.vf[2] = 1.0f / sqrtf(_a.vf[2]);
+ result.vf[3] = 1.0f / sqrtf(_a.vf[3]);
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_cmpeq(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vf == _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_cmplt(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vf < _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_cmple(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vf <= _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_cmpgt(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vf > _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_cmpge(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vf >= _b.vf;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_and(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vu = _a.vu & _b.vu;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_andc(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vu = _a.vu & ~_b.vu;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_or(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vu = _a.vu | _b.vu;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_xor(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vu = _a.vu ^ _b.vu;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_sll(simd128_langext_t _a, int _count)
+ {
+ simd128_langext_t result;
+ const simd128_langext_t count = simd_isplat<simd128_langext_t>(_count);
+ result.vu = _a.vu << count.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_srl(simd128_langext_t _a, int _count)
+ {
+ simd128_langext_t result;
+ const simd128_langext_t count = simd_isplat<simd128_langext_t>(_count);
+ result.vu = _a.vu >> count.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_sra(simd128_langext_t _a, int _count)
+ {
+ simd128_langext_t result;
+ const simd128_langext_t count = simd_isplat<simd128_langext_t>(_count);
+ result.vi = _a.vi >> count.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_icmpeq(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vi == _b.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_icmplt(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vi < _b.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_icmpgt(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vi > _b.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_iadd(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vi + _b.vi;
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_langext_t simd_isub(simd128_langext_t _a, simd128_langext_t _b)
+ {
+ simd128_langext_t result;
+ result.vi = _a.vi - _b.vi;
+ return result;
+ }
+
+ typedef simd128_langext_t simd128_t;
+
+} // namespace bx
+
+#endif // BX_SIMD128_LANGEXT_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd128_neon.inl b/3rdparty/bx/include/bx/simd128_neon.inl
new file mode 100644
index 00000000000..1dd0d1f12b2
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd128_neon.inl
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD128_NEON_H_HEADER_GUARD
+#define BX_SIMD128_NEON_H_HEADER_GUARD
+
+#define simd_rcp simd_rcp_ni
+#define simd_orx simd_orx_ni
+#define simd_orc simd_orc_ni
+#define simd_neg simd_neg_ni
+#define simd_madd simd_madd_ni
+#define simd_nmsub simd_nmsub_ni
+#define simd_div_nr simd_div_nr_ni
+#define simd_div simd_div_nr_ni
+#define simd_selb simd_selb_ni
+#define simd_sels simd_sels_ni
+#define simd_not simd_not_ni
+#define simd_abs simd_abs_ni
+#define simd_clamp simd_clamp_ni
+#define simd_lerp simd_lerp_ni
+#define simd_rsqrt simd_rsqrt_ni
+#define simd_rsqrt_nr simd_rsqrt_nr_ni
+#define simd_rsqrt_carmack simd_rsqrt_carmack_ni
+#define simd_sqrt_nr simd_sqrt_nr_ni
+#define simd_sqrt simd_sqrt_nr_ni
+#define simd_log2 simd_log2_ni
+#define simd_exp2 simd_exp2_ni
+#define simd_pow simd_pow_ni
+#define simd_cross3 simd_cross3_ni
+#define simd_normalize3 simd_normalize3_ni
+#define simd_dot3 simd_dot3_ni
+#define simd_dot simd_dot_ni
+#define simd_ceil simd_ceil_ni
+#define simd_floor simd_floor_ni
+
+#include "simd_ni.inl"
+
+namespace bx
+{
+#define ELEMx 0
+#define ELEMy 1
+#define ELEMz 2
+#define ELEMw 3
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ template<> \
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_swiz_##_x##_y##_z##_w(simd128_neon_t _a) \
+ { \
+ return __builtin_shuffle(_a, (uint32x4_t){ ELEM##_x, ELEM##_y, ELEM##_z, ELEM##_w }); \
+ }
+
+#include "simd128_swizzle.inl"
+
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
+#undef ELEMw
+#undef ELEMz
+#undef ELEMy
+#undef ELEMx
+
+#define BX_SIMD128_IMPLEMENT_TEST(_xyzw, _swizzle) \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_any_##_xyzw(simd128_neon_t _test) \
+ { \
+ const simd128_neon_t tmp0 = simd_swiz_##_swizzle(_test); \
+ return simd_test_any_ni(tmp0); \
+ } \
+ \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_all_##_xyzw(simd128_neon_t _test) \
+ { \
+ const simd128_neon_t tmp0 = simd_swiz_##_swizzle(_test); \
+ return simd_test_all_ni(tmp0); \
+ }
+
+BX_SIMD128_IMPLEMENT_TEST(x, xxxx);
+BX_SIMD128_IMPLEMENT_TEST(y, yyyy);
+BX_SIMD128_IMPLEMENT_TEST(xy, xyyy);
+BX_SIMD128_IMPLEMENT_TEST(z, zzzz);
+BX_SIMD128_IMPLEMENT_TEST(xz, xzzz);
+BX_SIMD128_IMPLEMENT_TEST(yz, yzzz);
+BX_SIMD128_IMPLEMENT_TEST(xyz, xyzz);
+BX_SIMD128_IMPLEMENT_TEST(w, wwww);
+BX_SIMD128_IMPLEMENT_TEST(xw, xwww);
+BX_SIMD128_IMPLEMENT_TEST(yw, ywww);
+BX_SIMD128_IMPLEMENT_TEST(xyw, xyww);
+BX_SIMD128_IMPLEMENT_TEST(zw, zwww);
+BX_SIMD128_IMPLEMENT_TEST(xzw, xzww);
+BX_SIMD128_IMPLEMENT_TEST(yzw, yzww);
+#undef BX_SIMD128_IMPLEMENT_TEST
+
+ template<>
+ BX_SIMD_FORCE_INLINE bool simd_test_any_xyzw(simd128_neon_t _test)
+ {
+ return simd_test_any_ni(_test);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE bool simd_test_all_xyzw(simd128_neon_t _test)
+ {
+ return simd_test_all_ni(_test);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_xyAB(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 0, 1, 4, 5 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_ABxy(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 4, 5, 0, 1 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_CDzw(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 6, 7, 2, 3 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_zwCD(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 2, 3, 6, 7 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_xAyB(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 0, 4, 1, 5 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_yBxA(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 1, 5, 0, 4 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_zCwD(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 2, 6, 3, 7 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_shuf_CzDw(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return __builtin_shuffle(_a, _b, (uint32x4_t){ 6, 2, 7, 3 });
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_x(simd128_neon_t _a)
+ {
+ return vgetq_lane_f32(_a, 0);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_y(simd128_neon_t _a)
+ {
+ return vgetq_lane_f32(_a, 1);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_z(simd128_neon_t _a)
+ {
+ return vgetq_lane_f32(_a, 2);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_w(simd128_neon_t _a)
+ {
+ return vgetq_lane_f32(_a, 3);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_ld(const void* _ptr)
+ {
+ return vld1q_f32( (const float32_t*)_ptr);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_st(void* _ptr, simd128_neon_t _a)
+ {
+ vst1q_f32( (float32_t*)_ptr, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stx(void* _ptr, simd128_neon_t _a)
+ {
+ vst1q_lane_f32( (float32_t*)_ptr, _a, 0);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stream(void* _ptr, simd128_neon_t _a)
+ {
+ vst1q_f32( (float32_t*)_ptr, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_ld(float _x, float _y, float _z, float _w)
+ {
+ const float32_t val[4] = {_x, _y, _z, _w};
+ return simd_ld<simd128_neon_t>(val);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
+ {
+ const uint32_t val[4] = {_x, _y, _z, _w};
+ const uint32x4_t tmp = vld1q_u32(val);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_splat(const void* _ptr)
+ {
+ const simd128_neon_t tmp0 = vld1q_f32( (const float32_t*)_ptr);
+ const float32x2_t tmp1 = vget_low_f32(tmp0);
+ const simd128_neon_t result = vdupq_lane_f32(tmp1, 0);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_splat(float _a)
+ {
+ return vdupq_n_f32(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_isplat(uint32_t _a)
+ {
+ const int32x4_t tmp = vdupq_n_s32(_a);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_zero()
+ {
+ return simd_isplat<simd128_neon_t>(0);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_itof(simd128_neon_t _a)
+ {
+ const int32x4_t itof = vreinterpretq_s32_f32(_a);
+ const simd128_neon_t result = vcvtq_f32_s32(itof);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_ftoi(simd128_neon_t _a)
+ {
+ const int32x4_t ftoi = vcvtq_s32_f32(_a);
+ const simd128_neon_t result = vreinterpretq_f32_s32(ftoi);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_add(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return vaddq_f32(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_sub(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return vsubq_f32(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_mul(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return vmulq_f32(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_rcp_est(simd128_neon_t _a)
+ {
+ return vrecpeq_f32(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_rsqrt_est(simd128_neon_t _a)
+ {
+ return vrsqrteq_f32(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_cmpeq(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const uint32x4_t tmp = vceqq_f32(_a, _b);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_cmplt(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const uint32x4_t tmp = vcltq_f32(_a, _b);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_cmple(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const uint32x4_t tmp = vcleq_f32(_a, _b);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_cmpgt(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const uint32x4_t tmp = vcgtq_f32(_a, _b);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_cmpge(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const uint32x4_t tmp = vcgeq_f32(_a, _b);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_min(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return vminq_f32(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_max(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return vmaxq_f32(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_and(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vandq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_andc(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vbicq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_or(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vorrq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_xor(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = veorq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_sll(simd128_neon_t _a, int _count)
+ {
+ if (__builtin_constant_p(_count) )
+ {
+ const uint32x4_t tmp0 = vreinterpretq_u32_f32(_a);
+ const uint32x4_t tmp1 = vshlq_n_u32(tmp0, _count);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp1);
+
+ return result;
+ }
+
+ const uint32x4_t tmp0 = vreinterpretq_u32_f32(_a);
+ const int32x4_t shift = vdupq_n_s32(_count);
+ const uint32x4_t tmp1 = vshlq_u32(tmp0, shift);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp1);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_srl(simd128_neon_t _a, int _count)
+ {
+ if (__builtin_constant_p(_count) )
+ {
+ const uint32x4_t tmp0 = vreinterpretq_u32_f32(_a);
+ const uint32x4_t tmp1 = vshrq_n_u32(tmp0, _count);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp1);
+
+ return result;
+ }
+
+ const uint32x4_t tmp0 = vreinterpretq_u32_f32(_a);
+ const int32x4_t shift = vdupq_n_s32(-_count);
+ const uint32x4_t tmp1 = vshlq_u32(tmp0, shift);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp1);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_sra(simd128_neon_t _a, int _count)
+ {
+ if (__builtin_constant_p(_count) )
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vshrq_n_s32(tmp0, _count);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp1);
+
+ return result;
+ }
+
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t shift = vdupq_n_s32(-_count);
+ const int32x4_t tmp1 = vshlq_s32(tmp0, shift);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp1);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_madd(simd128_neon_t _a, simd128_neon_t _b, simd128_neon_t _c)
+ {
+ return vmlaq_f32(_c, _a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_nmsub(simd128_neon_t _a, simd128_neon_t _b, simd128_neon_t _c)
+ {
+ return vmlsq_f32(_c, _a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_icmpeq(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const uint32x4_t tmp2 = vceqq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_icmplt(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const uint32x4_t tmp2 = vcltq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_icmpgt(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const uint32x4_t tmp2 = vcgtq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_u32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_imin(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vminq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_imax(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vmaxq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_iadd(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vaddq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_neon_t simd_isub(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ const int32x4_t tmp0 = vreinterpretq_s32_f32(_a);
+ const int32x4_t tmp1 = vreinterpretq_s32_f32(_b);
+ const int32x4_t tmp2 = vsubq_s32(tmp0, tmp1);
+ const simd128_neon_t result = vreinterpretq_f32_s32(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_neon_t simd_shuf_xAzC(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return simd_shuf_xAzC_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_neon_t simd_shuf_yBwD(simd128_neon_t _a, simd128_neon_t _b)
+ {
+ return simd_shuf_yBwD_ni(_a, _b);
+ }
+
+ typedef simd128_neon_t simd128_t;
+
+} // namespace bx
+
+#endif // BX_SIMD128_NEON_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/float4_ref.h b/3rdparty/bx/include/bx/simd128_ref.inl
index e54862ca371..da08f5088b6 100644
--- a/3rdparty/bx/include/bx/float4_ref.h
+++ b/3rdparty/bx/include/bx/simd128_ref.inl
@@ -3,29 +3,53 @@
* License: https://github.com/bkaradzic/bx#license-bsd-2-clause
*/
-#ifndef BX_FLOAT4_REF_H_HEADER_GUARD
-#define BX_FLOAT4_REF_H_HEADER_GUARD
+#ifndef BX_SIMD128_REF_H_HEADER_GUARD
+#define BX_SIMD128_REF_H_HEADER_GUARD
#include <math.h> // sqrtf
+#define simd_shuf_xAzC simd_shuf_xAzC_ni
+#define simd_shuf_yBwD simd_shuf_yBwD_ni
+#define simd_rcp simd_rcp_ni
+#define simd_orx simd_orx_ni
+#define simd_orc simd_orc_ni
+#define simd_neg simd_neg_ni
+#define simd_madd simd_madd_ni
+#define simd_nmsub simd_nmsub_ni
+#define simd_div_nr simd_div_nr_ni
+#define simd_selb simd_selb_ni
+#define simd_sels simd_sels_ni
+#define simd_not simd_not_ni
+#define simd_abs simd_abs_ni
+#define simd_clamp simd_clamp_ni
+#define simd_lerp simd_lerp_ni
+#define simd_rsqrt simd_rsqrt_ni
+#define simd_rsqrt_nr simd_rsqrt_nr_ni
+#define simd_rsqrt_carmack simd_rsqrt_carmack_ni
+#define simd_sqrt_nr simd_sqrt_nr_ni
+#define simd_log2 simd_log2_ni
+#define simd_exp2 simd_exp2_ni
+#define simd_pow simd_pow_ni
+#define simd_cross3 simd_cross3_ni
+#define simd_normalize3 simd_normalize3_ni
+#define simd_dot3 simd_dot3_ni
+#define simd_dot simd_dot_ni
+#define simd_ceil simd_ceil_ni
+#define simd_floor simd_floor_ni
+
+#include "simd_ni.inl"
+
namespace bx
{
- typedef union float4_t
- {
- float fxyzw[4];
- int32_t ixyzw[4];
- uint32_t uxyzw[4];
-
- } float4_t;
-
#define ELEMx 0
#define ELEMy 1
#define ELEMz 2
#define ELEMw 3
-#define IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
- BX_FLOAT4_FORCE_INLINE float4_t float4_swiz_##_x##_y##_z##_w(float4_t _a) \
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ template<> \
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_swiz_##_x##_y##_z##_w(simd128_ref_t _a) \
{ \
- float4_t result; \
+ simd128_ref_t result; \
result.ixyzw[0] = _a.ixyzw[ELEM##_x]; \
result.ixyzw[1] = _a.ixyzw[ELEM##_y]; \
result.ixyzw[2] = _a.ixyzw[ELEM##_z]; \
@@ -33,16 +57,17 @@ namespace bx
return result; \
}
-#include "float4_swizzle.inl"
+#include "simd128_swizzle.inl"
-#undef IMPLEMENT_SWIZZLE
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
#undef ELEMw
#undef ELEMz
#undef ELEMy
#undef ELEMx
-#define IMPLEMENT_TEST(_xyzw, _mask) \
- BX_FLOAT4_FORCE_INLINE bool float4_test_any_##_xyzw(float4_t _test) \
+#define BX_SIMD128_IMPLEMENT_TEST(_xyzw, _mask) \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_any_##_xyzw(simd128_ref_t _test) \
{ \
uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
| ( (_test.uxyzw[2]>>31)<<2) \
@@ -52,7 +77,8 @@ namespace bx
return 0 != (tmp&(_mask) ); \
} \
\
- BX_FLOAT4_FORCE_INLINE bool float4_test_all_##_xyzw(float4_t _test) \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_all_##_xyzw(simd128_ref_t _test) \
{ \
uint32_t tmp = ( (_test.uxyzw[3]>>31)<<3) \
| ( (_test.uxyzw[2]>>31)<<2) \
@@ -62,27 +88,28 @@ namespace bx
return (_mask) == (tmp&(_mask) ); \
}
-IMPLEMENT_TEST(x , 0x1);
-IMPLEMENT_TEST(y , 0x2);
-IMPLEMENT_TEST(xy , 0x3);
-IMPLEMENT_TEST(z , 0x4);
-IMPLEMENT_TEST(xz , 0x5);
-IMPLEMENT_TEST(yz , 0x6);
-IMPLEMENT_TEST(xyz , 0x7);
-IMPLEMENT_TEST(w , 0x8);
-IMPLEMENT_TEST(xw , 0x9);
-IMPLEMENT_TEST(yw , 0xa);
-IMPLEMENT_TEST(xyw , 0xb);
-IMPLEMENT_TEST(zw , 0xc);
-IMPLEMENT_TEST(xzw , 0xd);
-IMPLEMENT_TEST(yzw , 0xe);
-IMPLEMENT_TEST(xyzw , 0xf);
-
-#undef IMPLEMENT_TEST
-
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xyAB(float4_t _a, float4_t _b)
- {
- float4_t result;
+BX_SIMD128_IMPLEMENT_TEST(x , 0x1);
+BX_SIMD128_IMPLEMENT_TEST(y , 0x2);
+BX_SIMD128_IMPLEMENT_TEST(xy , 0x3);
+BX_SIMD128_IMPLEMENT_TEST(z , 0x4);
+BX_SIMD128_IMPLEMENT_TEST(xz , 0x5);
+BX_SIMD128_IMPLEMENT_TEST(yz , 0x6);
+BX_SIMD128_IMPLEMENT_TEST(xyz , 0x7);
+BX_SIMD128_IMPLEMENT_TEST(w , 0x8);
+BX_SIMD128_IMPLEMENT_TEST(xw , 0x9);
+BX_SIMD128_IMPLEMENT_TEST(yw , 0xa);
+BX_SIMD128_IMPLEMENT_TEST(xyw , 0xb);
+BX_SIMD128_IMPLEMENT_TEST(zw , 0xc);
+BX_SIMD128_IMPLEMENT_TEST(xzw , 0xd);
+BX_SIMD128_IMPLEMENT_TEST(yzw , 0xe);
+BX_SIMD128_IMPLEMENT_TEST(xyzw , 0xf);
+
+#undef BX_SIMD128_IMPLEMENT_TEST
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_xyAB(simd128_ref_t _a, simd128_ref_t _b)
+ {
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0];
result.uxyzw[1] = _a.uxyzw[1];
result.uxyzw[2] = _b.uxyzw[0];
@@ -90,9 +117,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_ABxy(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_ABxy(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _b.uxyzw[0];
result.uxyzw[1] = _b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[0];
@@ -100,9 +128,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CDzw(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_CDzw(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _b.uxyzw[2];
result.uxyzw[1] = _b.uxyzw[3];
result.uxyzw[2] = _a.uxyzw[2];
@@ -110,9 +139,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zwCD(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_zwCD(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[2];
result.uxyzw[1] = _a.uxyzw[3];
result.uxyzw[2] = _b.uxyzw[2];
@@ -120,9 +150,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_xAyB(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_xAyB(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0];
result.uxyzw[1] = _b.uxyzw[0];
result.uxyzw[2] = _a.uxyzw[1];
@@ -130,9 +161,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_yBxA(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_yBxA(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[1];
result.uxyzw[1] = _b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[0];
@@ -140,9 +172,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_zCwD(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_zCwD(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[2];
result.uxyzw[1] = _b.uxyzw[2];
result.uxyzw[2] = _a.uxyzw[3];
@@ -150,9 +183,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_shuf_CzDw(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_shuf_CzDw(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _b.uxyzw[2];
result.uxyzw[1] = _a.uxyzw[2];
result.uxyzw[2] = _b.uxyzw[3];
@@ -160,30 +194,35 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float float4_x(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_x(simd128_ref_t _a)
{
return _a.fxyzw[0];
}
- BX_FLOAT4_FORCE_INLINE float float4_y(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_y(simd128_ref_t _a)
{
return _a.fxyzw[1];
}
- BX_FLOAT4_FORCE_INLINE float float4_z(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_z(simd128_ref_t _a)
{
return _a.fxyzw[2];
}
- BX_FLOAT4_FORCE_INLINE float float4_w(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_w(simd128_ref_t _a)
{
return _a.fxyzw[3];
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(const void* _ptr)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_ld(const void* _ptr)
{
const uint32_t* input = reinterpret_cast<const uint32_t*>(_ptr);
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = input[0];
result.uxyzw[1] = input[1];
result.uxyzw[2] = input[2];
@@ -191,7 +230,8 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE void float4_st(void* _ptr, float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_st(void* _ptr, simd128_ref_t _a)
{
uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
result[0] = _a.uxyzw[0];
@@ -200,13 +240,15 @@ IMPLEMENT_TEST(xyzw , 0xf);
result[3] = _a.uxyzw[3];
}
- BX_FLOAT4_FORCE_INLINE void float4_stx(void* _ptr, float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stx(void* _ptr, simd128_ref_t _a)
{
uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
result[0] = _a.uxyzw[0];
}
- BX_FLOAT4_FORCE_INLINE void float4_stream(void* _ptr, float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stream(void* _ptr, simd128_ref_t _a)
{
uint32_t* result = reinterpret_cast<uint32_t*>(_ptr);
result[0] = _a.uxyzw[0];
@@ -215,9 +257,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
result[3] = _a.uxyzw[3];
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_ld(float _x, float _y, float _z, float _w)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_ld(float _x, float _y, float _z, float _w)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _x;
result.fxyzw[1] = _y;
result.fxyzw[2] = _z;
@@ -225,9 +268,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _x;
result.uxyzw[1] = _y;
result.uxyzw[2] = _z;
@@ -235,10 +279,11 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(const void* _ptr)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_splat(const void* _ptr)
{
const uint32_t val = *reinterpret_cast<const uint32_t*>(_ptr);
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = val;
result.uxyzw[1] = val;
result.uxyzw[2] = val;
@@ -246,24 +291,28 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_splat(float _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_splat(float _a)
{
- return float4_ld(_a, _a, _a, _a);
+ return simd_ld<simd128_ref_t>(_a, _a, _a, _a);
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_isplat(uint32_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_isplat(uint32_t _a)
{
- return float4_ild(_a, _a, _a, _a);
+ return simd_ild<simd128_ref_t>(_a, _a, _a, _a);
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_zero()
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_zero()
{
- return float4_ild(0, 0, 0, 0);
+ return simd_ild<simd128_ref_t>(0, 0, 0, 0);
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_itof(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_itof(simd128_ref_t _a)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = (float)_a.ixyzw[0];
result.fxyzw[1] = (float)_a.ixyzw[1];
result.fxyzw[2] = (float)_a.ixyzw[2];
@@ -271,9 +320,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_ftoi(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_ftoi(simd128_ref_t _a)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = (int)_a.fxyzw[0];
result.ixyzw[1] = (int)_a.fxyzw[1];
result.ixyzw[2] = (int)_a.fxyzw[2];
@@ -281,17 +331,16 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_round(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_round(simd128_ref_t _a)
{
- const float4_t tmp = float4_ftoi(_a);
- const float4_t result = float4_itof(tmp);
-
- return result;
+ return simd_round_ni(_a);
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_add(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_add(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] + _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] + _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] + _b.fxyzw[2];
@@ -299,9 +348,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_sub(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_sub(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] - _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] - _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] - _b.fxyzw[2];
@@ -309,9 +359,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_mul(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_mul(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] * _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] * _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] * _b.fxyzw[2];
@@ -319,9 +370,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_div(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_div(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] / _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] / _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] / _b.fxyzw[2];
@@ -329,9 +381,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_rcp_est(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_rcp_est(simd128_ref_t _a)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = 1.0f / _a.fxyzw[0];
result.fxyzw[1] = 1.0f / _a.fxyzw[1];
result.fxyzw[2] = 1.0f / _a.fxyzw[2];
@@ -339,9 +392,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_sqrt(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_sqrt(simd128_ref_t _a)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = sqrtf(_a.fxyzw[0]);
result.fxyzw[1] = sqrtf(_a.fxyzw[1]);
result.fxyzw[2] = sqrtf(_a.fxyzw[2]);
@@ -349,9 +403,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_rsqrt_est(float4_t _a)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_rsqrt_est(simd128_ref_t _a)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = 1.0f / sqrtf(_a.fxyzw[0]);
result.fxyzw[1] = 1.0f / sqrtf(_a.fxyzw[1]);
result.fxyzw[2] = 1.0f / sqrtf(_a.fxyzw[2]);
@@ -359,9 +414,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpeq(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_cmpeq(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.fxyzw[0] == _b.fxyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.fxyzw[1] == _b.fxyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.fxyzw[2] == _b.fxyzw[2] ? 0xffffffff : 0x0;
@@ -369,9 +425,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmplt(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_cmplt(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.fxyzw[0] < _b.fxyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.fxyzw[1] < _b.fxyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.fxyzw[2] < _b.fxyzw[2] ? 0xffffffff : 0x0;
@@ -379,9 +436,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmple(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_cmple(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.fxyzw[0] <= _b.fxyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.fxyzw[1] <= _b.fxyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.fxyzw[2] <= _b.fxyzw[2] ? 0xffffffff : 0x0;
@@ -389,9 +447,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpgt(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_cmpgt(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.fxyzw[0] > _b.fxyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.fxyzw[1] > _b.fxyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.fxyzw[2] > _b.fxyzw[2] ? 0xffffffff : 0x0;
@@ -399,9 +458,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_cmpge(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_cmpge(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.fxyzw[0] >= _b.fxyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.fxyzw[1] >= _b.fxyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.fxyzw[2] >= _b.fxyzw[2] ? 0xffffffff : 0x0;
@@ -409,9 +469,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_min(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_min(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] < _b.fxyzw[0] ? _a.fxyzw[0] : _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] < _b.fxyzw[1] ? _a.fxyzw[1] : _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] < _b.fxyzw[2] ? _a.fxyzw[2] : _b.fxyzw[2];
@@ -419,9 +480,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_max(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_max(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.fxyzw[0] = _a.fxyzw[0] > _b.fxyzw[0] ? _a.fxyzw[0] : _b.fxyzw[0];
result.fxyzw[1] = _a.fxyzw[1] > _b.fxyzw[1] ? _a.fxyzw[1] : _b.fxyzw[1];
result.fxyzw[2] = _a.fxyzw[2] > _b.fxyzw[2] ? _a.fxyzw[2] : _b.fxyzw[2];
@@ -429,9 +491,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_and(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_and(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] & _b.uxyzw[0];
result.uxyzw[1] = _a.uxyzw[1] & _b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[2] & _b.uxyzw[2];
@@ -439,9 +502,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_andc(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_andc(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] & ~_b.uxyzw[0];
result.uxyzw[1] = _a.uxyzw[1] & ~_b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[2] & ~_b.uxyzw[2];
@@ -449,9 +513,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_or(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_or(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] | _b.uxyzw[0];
result.uxyzw[1] = _a.uxyzw[1] | _b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[2] | _b.uxyzw[2];
@@ -459,9 +524,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_xor(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_xor(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] ^ _b.uxyzw[0];
result.uxyzw[1] = _a.uxyzw[1] ^ _b.uxyzw[1];
result.uxyzw[2] = _a.uxyzw[2] ^ _b.uxyzw[2];
@@ -469,9 +535,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_sll(float4_t _a, int _count)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_sll(simd128_ref_t _a, int _count)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] << _count;
result.uxyzw[1] = _a.uxyzw[1] << _count;
result.uxyzw[2] = _a.uxyzw[2] << _count;
@@ -479,9 +546,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_srl(float4_t _a, int _count)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_srl(simd128_ref_t _a, int _count)
{
- float4_t result;
+ simd128_ref_t result;
result.uxyzw[0] = _a.uxyzw[0] >> _count;
result.uxyzw[1] = _a.uxyzw[1] >> _count;
result.uxyzw[2] = _a.uxyzw[2] >> _count;
@@ -489,9 +557,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_sra(float4_t _a, int _count)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_sra(simd128_ref_t _a, int _count)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] >> _count;
result.ixyzw[1] = _a.ixyzw[1] >> _count;
result.ixyzw[2] = _a.ixyzw[2] >> _count;
@@ -499,9 +568,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpeq(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_icmpeq(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] == _b.ixyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.ixyzw[1] == _b.ixyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.ixyzw[2] == _b.ixyzw[2] ? 0xffffffff : 0x0;
@@ -509,9 +579,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmplt(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_icmplt(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] < _b.ixyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.ixyzw[1] < _b.ixyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.ixyzw[2] < _b.ixyzw[2] ? 0xffffffff : 0x0;
@@ -519,9 +590,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_icmpgt(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_icmpgt(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] > _b.ixyzw[0] ? 0xffffffff : 0x0;
result.ixyzw[1] = _a.ixyzw[1] > _b.ixyzw[1] ? 0xffffffff : 0x0;
result.ixyzw[2] = _a.ixyzw[2] > _b.ixyzw[2] ? 0xffffffff : 0x0;
@@ -529,9 +601,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_imin(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_imin(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] < _b.ixyzw[0] ? _a.ixyzw[0] : _b.ixyzw[0];
result.ixyzw[1] = _a.ixyzw[1] < _b.ixyzw[1] ? _a.ixyzw[1] : _b.ixyzw[1];
result.ixyzw[2] = _a.ixyzw[2] < _b.ixyzw[2] ? _a.ixyzw[2] : _b.ixyzw[2];
@@ -539,9 +612,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_imax(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_imax(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] > _b.ixyzw[0] ? _a.ixyzw[0] : _b.ixyzw[0];
result.ixyzw[1] = _a.ixyzw[1] > _b.ixyzw[1] ? _a.ixyzw[1] : _b.ixyzw[1];
result.ixyzw[2] = _a.ixyzw[2] > _b.ixyzw[2] ? _a.ixyzw[2] : _b.ixyzw[2];
@@ -549,9 +623,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_iadd(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_iadd(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] + _b.ixyzw[0];
result.ixyzw[1] = _a.ixyzw[1] + _b.ixyzw[1];
result.ixyzw[2] = _a.ixyzw[2] + _b.ixyzw[2];
@@ -559,9 +634,10 @@ IMPLEMENT_TEST(xyzw , 0xf);
return result;
}
- BX_FLOAT4_FORCE_INLINE float4_t float4_isub(float4_t _a, float4_t _b)
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_ref_t simd_isub(simd128_ref_t _a, simd128_ref_t _b)
{
- float4_t result;
+ simd128_ref_t result;
result.ixyzw[0] = _a.ixyzw[0] - _b.ixyzw[0];
result.ixyzw[1] = _a.ixyzw[1] - _b.ixyzw[1];
result.ixyzw[2] = _a.ixyzw[2] - _b.ixyzw[2];
@@ -571,34 +647,4 @@ IMPLEMENT_TEST(xyzw , 0xf);
} // namespace bx
-#define float4_shuf_xAzC float4_shuf_xAzC_ni
-#define float4_shuf_yBwD float4_shuf_yBwD_ni
-#define float4_rcp float4_rcp_ni
-#define float4_orx float4_orx_ni
-#define float4_orc float4_orc_ni
-#define float4_neg float4_neg_ni
-#define float4_madd float4_madd_ni
-#define float4_nmsub float4_nmsub_ni
-#define float4_div_nr float4_div_nr_ni
-#define float4_selb float4_selb_ni
-#define float4_sels float4_sels_ni
-#define float4_not float4_not_ni
-#define float4_abs float4_abs_ni
-#define float4_clamp float4_clamp_ni
-#define float4_lerp float4_lerp_ni
-#define float4_rsqrt float4_rsqrt_ni
-#define float4_rsqrt_nr float4_rsqrt_nr_ni
-#define float4_rsqrt_carmack float4_rsqrt_carmack_ni
-#define float4_sqrt_nr float4_sqrt_nr_ni
-#define float4_log2 float4_log2_ni
-#define float4_exp2 float4_exp2_ni
-#define float4_pow float4_pow_ni
-#define float4_cross3 float4_cross3_ni
-#define float4_normalize3 float4_normalize3_ni
-#define float4_dot3 float4_dot3_ni
-#define float4_dot float4_dot_ni
-#define float4_ceil float4_ceil_ni
-#define float4_floor float4_floor_ni
-#include "float4_ni.h"
-
-#endif // BX_FLOAT4_REF_H_HEADER_GUARD
+#endif // BX_SIMD128_REF_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd128_sse.inl b/3rdparty/bx/include/bx/simd128_sse.inl
new file mode 100644
index 00000000000..b0ed8520ab4
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd128_sse.inl
@@ -0,0 +1,647 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD128_SSE_H_HEADER_GUARD
+#define BX_SIMD128_SSE_H_HEADER_GUARD
+
+#include "simd_ni.inl"
+
+namespace bx
+{
+#define ELEMx 0
+#define ELEMy 1
+#define ELEMz 2
+#define ELEMw 3
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ template<> \
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_swiz_##_x##_y##_z##_w(simd128_sse_t _a) \
+ { \
+ return _mm_shuffle_ps( _a, _a, _MM_SHUFFLE(ELEM##_w, ELEM##_z, ELEM##_y, ELEM##_x ) ); \
+ }
+
+#include "simd128_swizzle.inl"
+
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
+#undef ELEMw
+#undef ELEMz
+#undef ELEMy
+#undef ELEMx
+
+#define BX_SIMD128_IMPLEMENT_TEST(_xyzw, _mask) \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_any_##_xyzw(simd128_sse_t _test) \
+ { \
+ return 0x0 != (_mm_movemask_ps(_test)&(_mask) ); \
+ } \
+ \
+ template<> \
+ BX_SIMD_FORCE_INLINE bool simd_test_all_##_xyzw(simd128_sse_t _test) \
+ { \
+ return (_mask) == (_mm_movemask_ps(_test)&(_mask) ); \
+ }
+
+BX_SIMD128_IMPLEMENT_TEST(x , 0x1);
+BX_SIMD128_IMPLEMENT_TEST(y , 0x2);
+BX_SIMD128_IMPLEMENT_TEST(xy , 0x3);
+BX_SIMD128_IMPLEMENT_TEST(z , 0x4);
+BX_SIMD128_IMPLEMENT_TEST(xz , 0x5);
+BX_SIMD128_IMPLEMENT_TEST(yz , 0x6);
+BX_SIMD128_IMPLEMENT_TEST(xyz , 0x7);
+BX_SIMD128_IMPLEMENT_TEST(w , 0x8);
+BX_SIMD128_IMPLEMENT_TEST(xw , 0x9);
+BX_SIMD128_IMPLEMENT_TEST(yw , 0xa);
+BX_SIMD128_IMPLEMENT_TEST(xyw , 0xb);
+BX_SIMD128_IMPLEMENT_TEST(zw , 0xc);
+BX_SIMD128_IMPLEMENT_TEST(xzw , 0xd);
+BX_SIMD128_IMPLEMENT_TEST(yzw , 0xe);
+BX_SIMD128_IMPLEMENT_TEST(xyzw , 0xf);
+
+#undef BX_SIMD128_IMPLEMENT_TEST
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_xyAB(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_movelh_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_ABxy(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_movelh_ps(_b, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_CDzw(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_movehl_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_zwCD(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_movehl_ps(_b, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_xAyB(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_unpacklo_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_yBxA(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_unpacklo_ps(_b, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_zCwD(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_unpackhi_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_shuf_CzDw(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_unpackhi_ps(_b, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_x(simd128_sse_t _a)
+ {
+ return _mm_cvtss_f32(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_y(simd128_sse_t _a)
+ {
+ const simd128_sse_t yyyy = simd_swiz_yyyy(_a);
+ const float result = _mm_cvtss_f32(yyyy);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_z(simd128_sse_t _a)
+ {
+ const simd128_sse_t zzzz = simd_swiz_zzzz(_a);
+ const float result = _mm_cvtss_f32(zzzz);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE float simd_w(simd128_sse_t _a)
+ {
+ const simd128_sse_t wwww = simd_swiz_wwww(_a);
+ const float result = _mm_cvtss_f32(wwww);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_ld(const void* _ptr)
+ {
+ return _mm_load_ps(reinterpret_cast<const float*>(_ptr) );
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_st(void* _ptr, simd128_sse_t _a)
+ {
+ _mm_store_ps(reinterpret_cast<float*>(_ptr), _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stx(void* _ptr, simd128_sse_t _a)
+ {
+ _mm_store_ss(reinterpret_cast<float*>(_ptr), _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE void simd_stream(void* _ptr, simd128_sse_t _a)
+ {
+ _mm_stream_ps(reinterpret_cast<float*>(_ptr), _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_ld(float _x, float _y, float _z, float _w)
+ {
+ return _mm_set_ps(_w, _z, _y, _x);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
+ {
+ const __m128i set = _mm_set_epi32(_w, _z, _y, _x);
+ const simd128_sse_t result = _mm_castsi128_ps(set);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_splat(const void* _ptr)
+ {
+ const simd128_sse_t x___ = _mm_load_ss(reinterpret_cast<const float*>(_ptr) );
+ const simd128_sse_t result = simd_swiz_xxxx(x___);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_splat(float _a)
+ {
+ return _mm_set1_ps(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_isplat(uint32_t _a)
+ {
+ const __m128i splat = _mm_set1_epi32(_a);
+ const simd128_sse_t result = _mm_castsi128_ps(splat);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_zero()
+ {
+ return _mm_setzero_ps();
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_itof(simd128_sse_t _a)
+ {
+ const __m128i itof = _mm_castps_si128(_a);
+ const simd128_sse_t result = _mm_cvtepi32_ps(itof);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_ftoi(simd128_sse_t _a)
+ {
+ const __m128i ftoi = _mm_cvtps_epi32(_a);
+ const simd128_sse_t result = _mm_castsi128_ps(ftoi);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_round(simd128_sse_t _a)
+ {
+#if defined(__SSE4_1__)
+ return _mm_round_ps(_a, _MM_FROUND_NINT);
+#else
+ const __m128i round = _mm_cvtps_epi32(_a);
+ const simd128_sse_t result = _mm_cvtepi32_ps(round);
+
+ return result;
+#endif // defined(__SSE4_1__)
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_add(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_add_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_sub(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_sub_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_mul(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_mul_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_div(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_div_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_rcp_est(simd128_sse_t _a)
+ {
+ return _mm_rcp_ps(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_sqrt(simd128_sse_t _a)
+ {
+ return _mm_sqrt_ps(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_rsqrt_est(simd128_sse_t _a)
+ {
+ return _mm_rsqrt_ps(_a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_dot3(simd128_sse_t _a, simd128_sse_t _b)
+ {
+#if defined(__SSE4_1__)
+ return _mm_dp_ps(_a, _b, 0x77);
+#else
+ return simd_dot3_ni(_a, _b);
+#endif // defined(__SSE4__)
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_dot(simd128_sse_t _a, simd128_sse_t _b)
+ {
+#if defined(__SSE4_1__)
+ return _mm_dp_ps(_a, _b, 0xFF);
+#else
+ return simd_dot_ni(_a, _b);
+#endif // defined(__SSE4__)
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_cmpeq(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_cmpeq_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_cmplt(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_cmplt_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_cmple(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_cmple_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_cmpgt(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_cmpgt_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_cmpge(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_cmpge_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_min(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_min_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_max(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_max_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_and(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_and_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_andc(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_andnot_ps(_b, _a);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_or(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_or_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_xor(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return _mm_xor_ps(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_sll(simd128_sse_t _a, int _count)
+ {
+ const __m128i a = _mm_castps_si128(_a);
+ const __m128i shift = _mm_slli_epi32(a, _count);
+ const simd128_sse_t result = _mm_castsi128_ps(shift);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_srl(simd128_sse_t _a, int _count)
+ {
+ const __m128i a = _mm_castps_si128(_a);
+ const __m128i shift = _mm_srli_epi32(a, _count);
+ const simd128_sse_t result = _mm_castsi128_ps(shift);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_sra(simd128_sse_t _a, int _count)
+ {
+ const __m128i a = _mm_castps_si128(_a);
+ const __m128i shift = _mm_srai_epi32(a, _count);
+ const simd128_sse_t result = _mm_castsi128_ps(shift);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_icmpeq(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ const __m128i tmp0 = _mm_castps_si128(_a);
+ const __m128i tmp1 = _mm_castps_si128(_b);
+ const __m128i tmp2 = _mm_cmpeq_epi32(tmp0, tmp1);
+ const simd128_sse_t result = _mm_castsi128_ps(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_icmplt(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ const __m128i tmp0 = _mm_castps_si128(_a);
+ const __m128i tmp1 = _mm_castps_si128(_b);
+ const __m128i tmp2 = _mm_cmplt_epi32(tmp0, tmp1);
+ const simd128_sse_t result = _mm_castsi128_ps(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_icmpgt(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ const __m128i tmp0 = _mm_castps_si128(_a);
+ const __m128i tmp1 = _mm_castps_si128(_b);
+ const __m128i tmp2 = _mm_cmpgt_epi32(tmp0, tmp1);
+ const simd128_sse_t result = _mm_castsi128_ps(tmp2);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_imin(simd128_sse_t _a, simd128_sse_t _b)
+ {
+#if defined(__SSE4_1__)
+ const __m128i tmp0 = _mm_castps_si128(_a);
+ const __m128i tmp1 = _mm_castps_si128(_b);
+ const __m128i tmp2 = _mm_min_epi32(tmp0, tmp1);
+ const simd128_sse_t result = _mm_castsi128_ps(tmp2);
+
+ return result;
+#else
+ return simd_imin_ni(_a, _b);
+#endif // defined(__SSE4_1__)
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_imax(simd128_sse_t _a, simd128_sse_t _b)
+ {
+#if defined(__SSE4_1__)
+ const __m128i tmp0 = _mm_castps_si128(_a);
+ const __m128i tmp1 = _mm_castps_si128(_b);
+ const __m128i tmp2 = _mm_max_epi32(tmp0, tmp1);
+ const simd128_sse_t result = _mm_castsi128_ps(tmp2);
+
+ return result;
+#else
+ return simd_imax_ni(_a, _b);
+#endif // defined(__SSE4_1__)
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_iadd(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ const __m128i a = _mm_castps_si128(_a);
+ const __m128i b = _mm_castps_si128(_b);
+ const __m128i add = _mm_add_epi32(a, b);
+ const simd128_sse_t result = _mm_castsi128_ps(add);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_FORCE_INLINE simd128_sse_t simd_isub(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ const __m128i a = _mm_castps_si128(_a);
+ const __m128i b = _mm_castps_si128(_b);
+ const __m128i sub = _mm_sub_epi32(a, b);
+ const simd128_sse_t result = _mm_castsi128_ps(sub);
+
+ return result;
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_shuf_xAzC(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_shuf_xAzC_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_shuf_yBwD(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_shuf_yBwD_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_rcp(simd128_sse_t _a)
+ {
+ return simd_rcp_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_orx(simd128_sse_t _a)
+ {
+ return simd_orx_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_orc(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_orc_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_neg(simd128_sse_t _a)
+ {
+ return simd_neg_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_madd(simd128_sse_t _a, simd128_sse_t _b, simd128_sse_t _c)
+ {
+ return simd_madd_ni(_a, _b, _c);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_nmsub(simd128_sse_t _a, simd128_sse_t _b, simd128_sse_t _c)
+ {
+ return simd_nmsub_ni(_a, _b, _c);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_div_nr(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_div_nr_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_selb(simd128_sse_t _mask, simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_selb_ni(_mask, _a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_sels(simd128_sse_t _test, simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_sels_ni(_test, _a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_not(simd128_sse_t _a)
+ {
+ return simd_not_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_abs(simd128_sse_t _a)
+ {
+ return simd_abs_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_clamp(simd128_sse_t _a, simd128_sse_t _min, simd128_sse_t _max)
+ {
+ return simd_clamp_ni(_a, _min, _max);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_lerp(simd128_sse_t _a, simd128_sse_t _b, simd128_sse_t _s)
+ {
+ return simd_lerp_ni(_a, _b, _s);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_rsqrt(simd128_sse_t _a)
+ {
+ return simd_rsqrt_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_rsqrt_nr(simd128_sse_t _a)
+ {
+ return simd_rsqrt_nr_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_rsqrt_carmack(simd128_sse_t _a)
+ {
+ return simd_rsqrt_carmack_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_sqrt_nr(simd128_sse_t _a)
+ {
+ return simd_sqrt_nr_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_log2(simd128_sse_t _a)
+ {
+ return simd_log2_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_exp2(simd128_sse_t _a)
+ {
+ return simd_exp2_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_pow(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_pow_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_cross3(simd128_sse_t _a, simd128_sse_t _b)
+ {
+ return simd_cross3_ni(_a, _b);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_normalize3(simd128_sse_t _a)
+ {
+ return simd_normalize3_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_ceil(simd128_sse_t _a)
+ {
+ return simd_ceil_ni(_a);
+ }
+
+ template<>
+ BX_SIMD_INLINE simd128_sse_t simd_floor(simd128_sse_t _a)
+ {
+ return simd_floor_ni(_a);
+ }
+
+ typedef simd128_sse_t simd128_t;
+
+} // namespace bx
+
+#endif // BX_SIMD128_SSE_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd128_swizzle.inl b/3rdparty/bx/include/bx/simd128_swizzle.inl
new file mode 100644
index 00000000000..4185be81b60
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd128_swizzle.inl
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2010-2015 Branimir Karadzic. All rights reserved.
+ * License: http://www.opensource.org/licenses/BSD-2-Clause
+ */
+
+#ifndef BX_SIMD_T_H_HEADER_GUARD
+# error "xmacro file, must be included from simd_*.h"
+#endif // BX_FLOAT4_T_H_HEADER_GUARD
+
+// included from float4_t.h
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, x, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, z, z)
+// BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, y, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, z, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(x, w, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, x, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, y, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, z, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(y, w, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, x, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, y, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, z, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(z, w, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, x, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, y, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, z, w, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, x, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, x, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, x, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, x, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, y, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, y, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, y, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, y, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, z, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, z, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, z, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, z, w)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, w, x)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, w, y)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, w, z)
+BX_SIMD128_IMPLEMENT_SWIZZLE(w, w, w, w)
diff --git a/3rdparty/bx/include/bx/simd256_avx.inl b/3rdparty/bx/include/bx/simd256_avx.inl
new file mode 100644
index 00000000000..c0f925e4160
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd256_avx.inl
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD256_AVX_H_HEADER_GUARD
+#define BX_SIMD256_AVX_H_HEADER_GUARD
+
+#endif // BX_SIMD256_AVX_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd256_ref.inl b/3rdparty/bx/include/bx/simd256_ref.inl
new file mode 100644
index 00000000000..84ecd6e5f9f
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd256_ref.inl
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD256_REF_H_HEADER_GUARD
+#define BX_SIMD256_REF_H_HEADER_GUARD
+
+#endif // BX_SIMD256_REF_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd_ni.inl b/3rdparty/bx/include/bx/simd_ni.inl
new file mode 100644
index 00000000000..cab10861e5e
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd_ni.inl
@@ -0,0 +1,558 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD_NI_H_HEADER_GUARD
+#define BX_SIMD_NI_H_HEADER_GUARD
+
+namespace bx
+{
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_shuf_xAzC_ni(Ty _a, Ty _b)
+ {
+ const Ty xAyB = simd_shuf_xAyB(_a, _b);
+ const Ty zCwD = simd_shuf_zCwD(_a, _b);
+ const Ty result = simd_shuf_xyAB(xAyB, zCwD);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_shuf_yBwD_ni(Ty _a, Ty _b)
+ {
+ const Ty xAyB = simd_shuf_xAyB(_a, _b);
+ const Ty zCwD = simd_shuf_zCwD(_a, _b);
+ const Ty result = simd_shuf_zwCD(xAyB, zCwD);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_madd_ni(Ty _a, Ty _b, Ty _c)
+ {
+ const Ty mul = simd_mul(_a, _b);
+ const Ty result = simd_add(mul, _c);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_nmsub_ni(Ty _a, Ty _b, Ty _c)
+ {
+ const Ty mul = simd_mul(_a, _b);
+ const Ty result = simd_sub(_c, mul);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_div_nr_ni(Ty _a, Ty _b)
+ {
+ const Ty oneish = simd_isplat<Ty>(0x3f800001);
+ const Ty est = simd_rcp_est(_b);
+ const Ty iter0 = simd_mul(_a, est);
+ const Ty tmp1 = simd_nmsub(_b, est, oneish);
+ const Ty result = simd_madd(tmp1, iter0, iter0);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rcp_ni(Ty _a)
+ {
+ const Ty one = simd_splat<Ty>(1.0f);
+ const Ty result = simd_div(one, _a);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_orx_ni(Ty _a)
+ {
+ const Ty zwxy = simd_swiz_zwxy(_a);
+ const Ty tmp0 = simd_or(_a, zwxy);
+ const Ty tmp1 = simd_swiz_yyyy(_a);
+ const Ty tmp2 = simd_or(tmp0, tmp1);
+ const Ty mf000 = simd_ild<Ty>(UINT32_MAX, 0, 0, 0);
+ const Ty result = simd_and(tmp2, mf000);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_orc_ni(Ty _a, Ty _b)
+ {
+ const Ty aorb = simd_or(_a, _b);
+ const Ty mffff = simd_isplat<Ty>(UINT32_MAX);
+ const Ty result = simd_xor(aorb, mffff);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_neg_ni(Ty _a)
+ {
+ const Ty zero = simd_zero<Ty>();
+ const Ty result = simd_sub(zero, _a);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_selb_ni(Ty _mask, Ty _a, Ty _b)
+ {
+ const Ty sel_a = simd_and(_a, _mask);
+ const Ty sel_b = simd_andc(_b, _mask);
+ const Ty result = simd_or(sel_a, sel_b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_sels_ni(Ty _test, Ty _a, Ty _b)
+ {
+ const Ty mask = simd_sra(_test, 31);
+ const Ty result = simd_selb(mask, _a, _b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_not_ni(Ty _a)
+ {
+ const Ty mffff = simd_isplat<Ty>(UINT32_MAX);
+ const Ty result = simd_xor(_a, mffff);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_min_ni(Ty _a, Ty _b)
+ {
+ const Ty mask = simd_cmplt(_a, _b);
+ const Ty result = simd_selb(mask, _a, _b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_max_ni(Ty _a, Ty _b)
+ {
+ const Ty mask = simd_cmpgt(_a, _b);
+ const Ty result = simd_selb(mask, _a, _b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_abs_ni(Ty _a)
+ {
+ const Ty a_neg = simd_neg(_a);
+ const Ty result = simd_max(a_neg, _a);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_imin_ni(Ty _a, Ty _b)
+ {
+ const Ty mask = simd_icmplt(_a, _b);
+ const Ty result = simd_selb(mask, _a, _b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_imax_ni(Ty _a, Ty _b)
+ {
+ const Ty mask = simd_icmpgt(_a, _b);
+ const Ty result = simd_selb(mask, _a, _b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_clamp_ni(Ty _a, Ty _min, Ty _max)
+ {
+ const Ty tmp = simd_min(_a, _max);
+ const Ty result = simd_max(tmp, _min);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_lerp_ni(Ty _a, Ty _b, Ty _s)
+ {
+ const Ty ba = simd_sub(_b, _a);
+ const Ty result = simd_madd(_s, ba, _a);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_sqrt_nr_ni(Ty _a)
+ {
+ const Ty half = simd_splat<Ty>(0.5f);
+ const Ty one = simd_splat<Ty>(1.0f);
+ const Ty tmp0 = simd_rsqrt_est(_a);
+ const Ty tmp1 = simd_mul(tmp0, _a);
+ const Ty tmp2 = simd_mul(tmp1, half);
+ const Ty tmp3 = simd_nmsub(tmp0, tmp1, one);
+ const Ty result = simd_madd(tmp3, tmp2, tmp1);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_sqrt_nr1_ni(Ty _a)
+ {
+ const Ty half = simd_splat<Ty>(0.5f);
+
+ Ty result = _a;
+ for (uint32_t ii = 0; ii < 11; ++ii)
+ {
+ const Ty tmp1 = simd_div(_a, result);
+ const Ty tmp2 = simd_add(tmp1, result);
+ result = simd_mul(tmp2, half);
+ }
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt_ni(Ty _a)
+ {
+ const Ty one = simd_splat<Ty>(1.0f);
+ const Ty sqrt = simd_sqrt(_a);
+ const Ty result = simd_div(one, sqrt);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt_nr_ni(Ty _a)
+ {
+ const Ty rsqrt = simd_rsqrt_est(_a);
+ const Ty iter0 = simd_mul(_a, rsqrt);
+ const Ty iter1 = simd_mul(iter0, rsqrt);
+ const Ty half = simd_splat<Ty>(0.5f);
+ const Ty half_rsqrt = simd_mul(half, rsqrt);
+ const Ty three = simd_splat<Ty>(3.0f);
+ const Ty three_sub_iter1 = simd_sub(three, iter1);
+ const Ty result = simd_mul(half_rsqrt, three_sub_iter1);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt_carmack_ni(Ty _a)
+ {
+ const Ty half = simd_splat<Ty>(0.5f);
+ const Ty ah = simd_mul(half, _a);
+ const Ty ashift = simd_sra(_a, 1);
+ const Ty magic = simd_isplat<Ty>(0x5f3759df);
+ const Ty msuba = simd_isub(magic, ashift);
+ const Ty msubasq = simd_mul(msuba, msuba);
+ const Ty tmp0 = simd_splat<Ty>(1.5f);
+ const Ty tmp1 = simd_mul(ah, msubasq);
+ const Ty tmp2 = simd_sub(tmp0, tmp1);
+ const Ty result = simd_mul(msuba, tmp2);
+
+ return result;
+ }
+
+ namespace simd_logexp_detail
+ {
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_poly1(Ty _a, float _b, float _c)
+ {
+ const Ty bbbb = simd_splat<Ty>(_b);
+ const Ty cccc = simd_splat<Ty>(_c);
+ const Ty result = simd_madd(cccc, _a, bbbb);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_poly2(Ty _a, float _b, float _c, float _d)
+ {
+ const Ty bbbb = simd_splat<Ty>(_b);
+ const Ty poly = simd_poly1(_a, _c, _d);
+ const Ty result = simd_madd(poly, _a, bbbb);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_poly3(Ty _a, float _b, float _c, float _d, float _e)
+ {
+ const Ty bbbb = simd_splat<Ty>(_b);
+ const Ty poly = simd_poly2(_a, _c, _d, _e);
+ const Ty result = simd_madd(poly, _a, bbbb);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_poly4(Ty _a, float _b, float _c, float _d, float _e, float _f)
+ {
+ const Ty bbbb = simd_splat<Ty>(_b);
+ const Ty poly = simd_poly3(_a, _c, _d, _e, _f);
+ const Ty result = simd_madd(poly, _a, bbbb);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_poly5(Ty _a, float _b, float _c, float _d, float _e, float _f, float _g)
+ {
+ const Ty bbbb = simd_splat<Ty>(_b);
+ const Ty poly = simd_poly4(_a, _c, _d, _e, _f, _g);
+ const Ty result = simd_madd(poly, _a, bbbb);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_logpoly(Ty _a)
+ {
+#if 1
+ const Ty result = simd_poly5(_a
+ , 3.11578814719469302614f, -3.32419399085241980044f
+ , 2.59883907202499966007f, -1.23152682416275988241f
+ , 0.318212422185251071475f, -0.0344359067839062357313f
+ );
+#elif 0
+ const Ty result = simd_poly4(_a
+ , 2.8882704548164776201f, -2.52074962577807006663f
+ , 1.48116647521213171641f, -0.465725644288844778798f
+ , 0.0596515482674574969533f
+ );
+#elif 0
+ const Ty result = simd_poly3(_a
+ , 2.61761038894603480148f, -1.75647175389045657003f
+ , 0.688243882994381274313f, -0.107254423828329604454f
+ );
+#else
+ const Ty result = simd_poly2(_a
+ , 2.28330284476918490682f, -1.04913055217340124191f
+ , 0.204446009836232697516f
+ );
+#endif
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_exppoly(Ty _a)
+ {
+#if 1
+ const Ty result = simd_poly5(_a
+ , 9.9999994e-1f, 6.9315308e-1f
+ , 2.4015361e-1f, 5.5826318e-2f
+ , 8.9893397e-3f, 1.8775767e-3f
+ );
+#elif 0
+ const Ty result = simd_poly4(_a
+ , 1.0000026f, 6.9300383e-1f
+ , 2.4144275e-1f, 5.2011464e-2f
+ , 1.3534167e-2f
+ );
+#elif 0
+ const Ty result = simd_poly3(_a
+ , 9.9992520e-1f, 6.9583356e-1f
+ , 2.2606716e-1f, 7.8024521e-2f
+ );
+#else
+ const Ty result = simd_poly2(_a
+ , 1.0017247f, 6.5763628e-1f
+ , 3.3718944e-1f
+ );
+#endif // 0
+
+ return result;
+ }
+ } // namespace simd_internal
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_log2_ni(Ty _a)
+ {
+ const Ty expmask = simd_isplat<Ty>(0x7f800000);
+ const Ty mantmask = simd_isplat<Ty>(0x007fffff);
+ const Ty one = simd_splat<Ty>(1.0f);
+
+ const Ty c127 = simd_isplat<Ty>(127);
+ const Ty aexp = simd_and(_a, expmask);
+ const Ty aexpsr = simd_srl(aexp, 23);
+ const Ty tmp0 = simd_isub(aexpsr, c127);
+ const Ty exp = simd_itof(tmp0);
+
+ const Ty amask = simd_and(_a, mantmask);
+ const Ty mant = simd_or(amask, one);
+
+ const Ty poly = simd_logexp_detail::simd_logpoly(mant);
+
+ const Ty mandiff = simd_sub(mant, one);
+ const Ty result = simd_madd(poly, mandiff, exp);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_exp2_ni(Ty _a)
+ {
+ const Ty min = simd_splat<Ty>( 129.0f);
+ const Ty max = simd_splat<Ty>(-126.99999f);
+ const Ty tmp0 = simd_min(_a, min);
+ const Ty aaaa = simd_max(tmp0, max);
+
+ const Ty half = simd_splat<Ty>(0.5f);
+ const Ty tmp2 = simd_sub(aaaa, half);
+ const Ty ipart = simd_ftoi(tmp2);
+ const Ty iround = simd_itof(ipart);
+ const Ty fpart = simd_sub(aaaa, iround);
+
+ const Ty c127 = simd_isplat<Ty>(127);
+ const Ty tmp5 = simd_iadd(ipart, c127);
+ const Ty expipart = simd_sll(tmp5, 23);
+
+ const Ty expfpart = simd_logexp_detail::simd_exppoly(fpart);
+
+ const Ty result = simd_mul(expipart, expfpart);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_pow_ni(Ty _a, Ty _b)
+ {
+ const Ty alog2 = simd_log2(_a);
+ const Ty alog2b = simd_mul(alog2, _b);
+ const Ty result = simd_exp2(alog2b);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_dot3_ni(Ty _a, Ty _b)
+ {
+ const Ty xyzw = simd_mul(_a, _b);
+ const Ty xxxx = simd_swiz_xxxx(xyzw);
+ const Ty yyyy = simd_swiz_yyyy(xyzw);
+ const Ty zzzz = simd_swiz_zzzz(xyzw);
+ const Ty tmp1 = simd_add(xxxx, yyyy);
+ const Ty result = simd_add(zzzz, tmp1);
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_cross3_ni(Ty _a, Ty _b)
+ {
+ // a.yzx * b.zxy - a.zxy * b.yzx == (a * b.yzx - a.yzx * b).yzx
+#if 0
+ const Ty a_yzxw = simd_swiz_yzxw(_a);
+ const Ty a_zxyw = simd_swiz_zxyw(_a);
+ const Ty b_zxyw = simd_swiz_zxyw(_b);
+ const Ty b_yzxw = simd_swiz_yzxw(_b);
+ const Ty tmp = simd_mul(a_yzxw, b_zxyw);
+ const Ty result = simd_nmsub(a_zxyw, b_yzxw, tmp);
+#else
+ const Ty a_yzxw = simd_swiz_yzxw(_a);
+ const Ty b_yzxw = simd_swiz_yzxw(_b);
+ const Ty tmp0 = simd_mul(_a, b_yzxw);
+ const Ty tmp1 = simd_nmsub(a_yzxw, _b, tmp0);
+ const Ty result = simd_swiz_yzxw(tmp1);
+#endif
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_normalize3_ni(Ty _a)
+ {
+ const Ty dot3 = simd_dot3(_a, _a);
+ const Ty invSqrt = simd_rsqrt(dot3);
+ const Ty result = simd_mul(_a, invSqrt);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_dot_ni(Ty _a, Ty _b)
+ {
+ const Ty xyzw = simd_mul(_a, _b);
+ const Ty yzwx = simd_swiz_yzwx(xyzw);
+ const Ty tmp0 = simd_add(xyzw, yzwx);
+ const Ty zwxy = simd_swiz_zwxy(tmp0);
+ const Ty result = simd_add(tmp0, zwxy);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_ceil_ni(Ty _a)
+ {
+ const Ty tmp0 = simd_ftoi(_a);
+ const Ty tmp1 = simd_itof(tmp0);
+ const Ty mask = simd_cmplt(tmp1, _a);
+ const Ty one = simd_splat<Ty>(1.0f);
+ const Ty tmp2 = simd_and(one, mask);
+ const Ty result = simd_add(tmp1, tmp2);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_floor_ni(Ty _a)
+ {
+ const Ty tmp0 = simd_ftoi(_a);
+ const Ty tmp1 = simd_itof(tmp0);
+ const Ty mask = simd_cmpgt(tmp1, _a);
+ const Ty one = simd_splat<Ty>(1.0f);
+ const Ty tmp2 = simd_and(one, mask);
+ const Ty result = simd_sub(tmp1, tmp2);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_round_ni(Ty _a)
+ {
+ const Ty tmp = simd_ftoi(_a);
+ const Ty result = simd_itof(tmp);
+
+ return result;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE bool simd_test_any_ni(Ty _a)
+ {
+ const Ty mask = simd_sra(_a, 31);
+ const Ty zwxy = simd_swiz_zwxy(mask);
+ const Ty tmp0 = simd_or(mask, zwxy);
+ const Ty tmp1 = simd_swiz_yyyy(tmp0);
+ const Ty tmp2 = simd_or(tmp0, tmp1);
+ int res;
+ simd_stx(&res, tmp2);
+ return 0 != res;
+ }
+
+ template<typename Ty>
+ BX_SIMD_INLINE bool simd_test_all_ni(Ty _a)
+ {
+ const Ty bits = simd_sra(_a, 31);
+ const Ty m1248 = simd_ild<Ty>(1, 2, 4, 8);
+ const Ty mask = simd_and(bits, m1248);
+ const Ty zwxy = simd_swiz_zwxy(mask);
+ const Ty tmp0 = simd_or(mask, zwxy);
+ const Ty tmp1 = simd_swiz_yyyy(tmp0);
+ const Ty tmp2 = simd_or(tmp0, tmp1);
+ int res;
+ simd_stx(&res, tmp2);
+ return 0xf == res;
+ }
+
+} // namespace bx
+
+#endif // BX_SIMD_NI_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/simd_t.h b/3rdparty/bx/include/bx/simd_t.h
new file mode 100644
index 00000000000..a2884f6e734
--- /dev/null
+++ b/3rdparty/bx/include/bx/simd_t.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#ifndef BX_SIMD_T_H_HEADER_GUARD
+#define BX_SIMD_T_H_HEADER_GUARD
+
+#include "bx.h"
+
+#define BX_SIMD_FORCE_INLINE BX_FORCE_INLINE
+#define BX_SIMD_INLINE inline
+
+#define BX_SIMD_AVX 0
+#define BX_SIMD_LANGEXT 0
+#define BX_SIMD_NEON 0
+#define BX_SIMD_SSE 0
+
+#if defined(__AVX__) || defined(__AVX2__)
+# include <immintrin.h>
+# undef BX_SIMD_AVX
+# define BX_SIMD_AVX 1
+#endif //
+
+#if defined(__SSE2__) || (BX_COMPILER_MSVC && (BX_ARCH_64BIT || _M_IX86_FP >= 2) )
+# include <emmintrin.h> // __m128i
+# if defined(__SSE4_1__)
+# include <smmintrin.h>
+# endif // defined(__SSE4_1__)
+# include <xmmintrin.h> // __m128
+# undef BX_SIMD_SSE
+# define BX_SIMD_SSE 1
+#elif defined(__ARM_NEON__) && !BX_COMPILER_CLANG
+# include <arm_neon.h>
+# undef BX_SIMD_NEON
+# define BX_SIMD_NEON 1
+#elif BX_COMPILER_CLANG \
+ && !BX_PLATFORM_EMSCRIPTEN \
+ && !BX_PLATFORM_IOS \
+ && BX_CLANG_HAS_EXTENSION(attribute_ext_vector_type)
+# include <math.h>
+# undef BX_SIMD_LANGEXT
+# define BX_SIMD_LANGEXT 1
+#endif //
+
+namespace bx
+{
+#define ELEMx 0
+#define ELEMy 1
+#define ELEMz 2
+#define ELEMw 3
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ template<typename Ty> \
+ BX_SIMD_FORCE_INLINE Ty simd_swiz_##_x##_y##_z##_w(Ty _a);
+#include "simd128_swizzle.inl"
+
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
+#undef ELEMw
+#undef ELEMz
+#undef ELEMy
+#undef ELEMx
+
+#define BX_SIMD128_IMPLEMENT_TEST(_xyzw) \
+ template<typename Ty> \
+ BX_SIMD_FORCE_INLINE bool simd_test_any_##_xyzw(Ty _test); \
+ \
+ template<typename Ty> \
+ BX_SIMD_FORCE_INLINE bool simd_test_all_##_xyzw(Ty _test)
+
+BX_SIMD128_IMPLEMENT_TEST(x );
+BX_SIMD128_IMPLEMENT_TEST(y );
+BX_SIMD128_IMPLEMENT_TEST(xy );
+BX_SIMD128_IMPLEMENT_TEST(z );
+BX_SIMD128_IMPLEMENT_TEST(xz );
+BX_SIMD128_IMPLEMENT_TEST(yz );
+BX_SIMD128_IMPLEMENT_TEST(xyz );
+BX_SIMD128_IMPLEMENT_TEST(w );
+BX_SIMD128_IMPLEMENT_TEST(xw );
+BX_SIMD128_IMPLEMENT_TEST(yw );
+BX_SIMD128_IMPLEMENT_TEST(xyw );
+BX_SIMD128_IMPLEMENT_TEST(zw );
+BX_SIMD128_IMPLEMENT_TEST(xzw );
+BX_SIMD128_IMPLEMENT_TEST(yzw );
+BX_SIMD128_IMPLEMENT_TEST(xyzw);
+#undef BX_SIMD128_IMPLEMENT_TEST
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_xyAB(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_ABxy(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_CDzw(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_zwCD(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_xAyB(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_yBxA(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_zCwD(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_shuf_CzDw(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE float simd_x(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE float simd_y(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE float simd_z(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE float simd_w(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_ld(const void* _ptr);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE void simd_st(void* _ptr, Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE void simd_stx(void* _ptr, Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE void simd_stream(void* _ptr, Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_ld(float _x, float _y, float _z, float _w);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_splat(const void* _ptr);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_splat(float _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_isplat(uint32_t _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_zero();
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_itof(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_ftoi(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_round(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_add(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_sub(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_mul(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_div(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_rcp_est(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_sqrt(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_rsqrt_est(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_dot3(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_dot(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_cmpeq(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_cmplt(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_cmple(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_cmpgt(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_cmpge(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_min(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_max(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_and(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_andc(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_or(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_xor(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_sll(Ty _a, int _count);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_srl(Ty _a, int _count);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_sra(Ty _a, int _count);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_icmpeq(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_icmplt(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_icmpgt(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_imin(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_imax(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_iadd(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_FORCE_INLINE Ty simd_isub(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_shuf_xAzC(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_shuf_yBwD(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rcp(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_orx(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_orc(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_neg(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_madd(Ty _a, Ty _b, Ty _c);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_nmsub(Ty _a, Ty _b, Ty _c);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_div_nr(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_selb(Ty _mask, Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_sels(Ty _test, Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_not(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_abs(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_clamp(Ty _a, Ty _min, Ty _max);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_lerp(Ty _a, Ty _b, Ty _s);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt_nr(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_rsqrt_carmack(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_sqrt_nr(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_log2(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_exp2(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_pow(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_cross3(Ty _a, Ty _b);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_normalize3(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_ceil(Ty _a);
+
+ template<typename Ty>
+ BX_SIMD_INLINE Ty simd_floor(Ty _a);
+
+#if BX_SIMD_AVX
+ typedef __m256 simd256_avx_t;
+#endif // BX_SIMD_SSE
+
+#if BX_SIMD_LANGEXT
+ union simd128_langext_t
+ {
+ float __attribute__((vector_size(16))) vf;
+ int32_t __attribute__((vector_size(16))) vi;
+ uint32_t __attribute__((vector_size(16))) vu;
+ float fxyzw[4];
+ int32_t ixyzw[4];
+ uint32_t uxyzw[4];
+
+ };
+#endif // BX_SIMD_LANGEXT
+
+#if BX_SIMD_NEON
+ typedef float32x4_t simd128_neon_t;
+#endif // BX_SIMD_NEON
+
+#if BX_SIMD_SSE
+ typedef __m128 simd128_sse_t;
+#endif // BX_SIMD_SSE
+
+ union simd128_ref_t
+ {
+ float fxyzw[4];
+ int32_t ixyzw[4];
+ uint32_t uxyzw[4];
+
+ };
+
+} // namespace bx
+
+#if BX_SIMD_AVX
+# include "simd256_avx.inl"
+#endif // BX_SIMD_AVX
+
+#if BX_SIMD_LANGEXT
+# include "simd128_langext.inl"
+#endif // BX_SIMD_LANGEXT
+
+#if BX_SIMD_NEON
+# include "simd128_neon.inl"
+#endif // BX_SIMD_NEON
+
+#if BX_SIMD_SSE
+# include "simd128_sse.inl"
+#endif // BX_SIMD_SSE
+
+#include "simd128_ref.inl"
+#include "simd256_ref.inl"
+
+namespace bx
+{
+#if !( BX_SIMD_AVX \
+ || BX_SIMD_LANGEXT \
+ || BX_SIMD_NEON \
+ || BX_SIMD_SSE \
+ )
+# ifndef BX_SIMD_WARN_REFERENCE_IMPL
+# define BX_SIMD_WARN_REFERENCE_IMPL 0
+# endif // BX_SIMD_WARN_REFERENCE_IMPL
+
+# if BX_SIMD_WARN_REFERENCE_IMPL
+# pragma message("************************************\nUsing SIMD reference implementation!\n************************************")
+# endif // BX_SIMD_WARN_REFERENCE_IMPL
+
+ typedef simd128_ref_t simd128_t;
+#endif //
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_zero()
+ {
+ return simd_zero<simd128_t>();
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_ld(const void* _ptr)
+ {
+ return simd_ld<simd128_t>(_ptr);
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_ld(float _x, float _y, float _z, float _w)
+ {
+ return simd_ld<simd128_t>(_x, _y, _z, _w);
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_ild(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w)
+ {
+ return simd_ild<simd128_t>(_x, _y, _z, _w);
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_splat(const void* _ptr)
+ {
+ return simd_splat<simd128_t>(_ptr);
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_splat(float _a)
+ {
+ return simd_splat<simd128_t>(_a);
+ }
+
+ BX_SIMD_FORCE_INLINE simd128_t simd_isplat(uint32_t _a)
+ {
+ return simd_isplat<simd128_t>(_a);
+ }
+} // namespace bx
+
+#endif // BX_SIMD_T_H_HEADER_GUARD
diff --git a/3rdparty/bx/include/bx/string.h b/3rdparty/bx/include/bx/string.h
index 937f15d1def..1bd322528b3 100644
--- a/3rdparty/bx/include/bx/string.h
+++ b/3rdparty/bx/include/bx/string.h
@@ -30,7 +30,7 @@ namespace bx
/// Case insensitive string compare.
inline int32_t stricmp(const char* _a, const char* _b)
{
-#if BX_COMPILER_MSVC_COMPATIBLE
+#if BX_CRT_MSVC
return ::_stricmp(_a, _b);
#else
return ::strcasecmp(_a, _b);
diff --git a/3rdparty/bx/scripts/bx.lua b/3rdparty/bx/scripts/bx.lua
index aa09b050f8a..8cae8b419e8 100644
--- a/3rdparty/bx/scripts/bx.lua
+++ b/3rdparty/bx/scripts/bx.lua
@@ -21,4 +21,5 @@ project "bx"
files {
"../include/**.h",
+ "../include/**.inl",
}
diff --git a/3rdparty/bx/scripts/toolchain.lua b/3rdparty/bx/scripts/toolchain.lua
index 5ff98ccf8c1..0f751dd3aa4 100644
--- a/3rdparty/bx/scripts/toolchain.lua
+++ b/3rdparty/bx/scripts/toolchain.lua
@@ -717,7 +717,7 @@ function toolchain(_buildDir, _libDir)
"NoImportLib",
}
includedirs {
- "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.8/include",
+ "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/include",
"$(ANDROID_NDK_ROOT)/sources/android/native_app_glue",
}
linkoptions {
@@ -811,10 +811,10 @@ function toolchain(_buildDir, _libDir)
objdir (path.join(_buildDir, "android-mips/obj"))
libdirs {
path.join(_libDir, "lib/android-mips"),
- "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.8/libs/mips",
+ "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/mips",
}
includedirs {
- "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.8/libs/mips/include",
+ "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/mips/include",
}
buildoptions {
"--sysroot=" .. path.join("$(ANDROID_NDK_ROOT)/platforms", androidPlatform, "arch-mips"),
@@ -832,10 +832,10 @@ function toolchain(_buildDir, _libDir)
objdir (path.join(_buildDir, "android-x86/obj"))
libdirs {
path.join(_libDir, "lib/android-x86"),
- "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.8/libs/x86",
+ "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/x86",
}
includedirs {
- "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.8/libs/x86/include",
+ "$(ANDROID_NDK_ROOT)/sources/cxx-stl/gnu-libstdc++/4.9/libs/x86/include",
}
buildoptions {
"--sysroot=" .. path.join("$(ANDROID_NDK_ROOT)/platforms", androidPlatform, "arch-x86"),
diff --git a/3rdparty/bx/tests/float4_t.cpp b/3rdparty/bx/tests/float4_t.cpp
deleted file mode 100644
index 3bdfb1976b1..00000000000
--- a/3rdparty/bx/tests/float4_t.cpp
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
- * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
- */
-
-#include "test.h"
-#include <bx/float4_t.h>
-#include <bx/fpumath.h>
-#include <string.h>
-
-using namespace bx;
-
-union float4_cast
-{
- bx::float4_t f4;
- float f[4];
- uint32_t ui[4];
- int32_t i[4];
- char c[16];
-};
-
-void float4_check_bool(const char* _str, bool _a, bool _0)
-{
- DBG("%s %d == %d"
- , _str
- , _a
- , _0
- );
-
- CHECK_EQUAL(_a, _0);
-}
-
-void float4_check_int32(const char* _str, bx::float4_t _a, int32_t _0, int32_t _1, int32_t _2, int32_t _3)
-{
- float4_cast c; c.f4 = _a;
- DBG("%s (%d, %d, %d, %d) == (%d, %d, %d, %d)"
- , _str
- , c.i[0], c.i[1], c.i[2], c.i[3]
- , _0, _1, _2, _3
- );
-
- CHECK_EQUAL(c.i[0], _0);
- CHECK_EQUAL(c.i[1], _1);
- CHECK_EQUAL(c.i[2], _2);
- CHECK_EQUAL(c.i[3], _3);
-}
-
-void float4_check_uint32(const char* _str, bx::float4_t _a, uint32_t _0, uint32_t _1, uint32_t _2, uint32_t _3)
-{
- float4_cast c; c.f4 = _a;
-
- DBG("%s (0x%08x, 0x%08x, 0x%08x, 0x%08x) == (0x%08x, 0x%08x, 0x%08x, 0x%08x)"
- , _str
- , c.ui[0], c.ui[1], c.ui[2], c.ui[3]
- , _0, _1, _2, _3
- );
-
- CHECK_EQUAL(c.ui[0], _0);
- CHECK_EQUAL(c.ui[1], _1);
- CHECK_EQUAL(c.ui[2], _2);
- CHECK_EQUAL(c.ui[3], _3);
-}
-
-void float4_check_float(const char* _str, bx::float4_t _a, float _0, float _1, float _2, float _3)
-{
- float4_cast c; c.f4 = _a;
-
- DBG("%s (%f, %f, %f, %f) == (%f, %f, %f, %f)"
- , _str
- , c.f[0], c.f[1], c.f[2], c.f[3]
- , _0, _1, _2, _3
- );
-
- CHECK(bx::fequal(c.f[0], _0, 0.0001f) );
- CHECK(bx::fequal(c.f[1], _1, 0.0001f) );
- CHECK(bx::fequal(c.f[2], _2, 0.0001f) );
- CHECK(bx::fequal(c.f[3], _3, 0.0001f) );
-}
-
-void float4_check_string(const char* _str, bx::float4_t _a)
-{
- float4_cast c; c.f4 = _a;
- const char test[5] = { c.c[0], c.c[4], c.c[8], c.c[12], '\0' };
-
- DBG("%s %s", _str, test);
-
- CHECK(0 == strcmp(_str, test) );
-}
-
-TEST(float4_swizzle)
-{
- const float4_t xyzw = float4_ild(0x78787878, 0x79797979, 0x7a7a7a7a, 0x77777777);
-
-#define ELEMx 0
-#define ELEMy 1
-#define ELEMz 2
-#define ELEMw 3
-#define IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
- float4_check_string("" #_x #_y #_z #_w "", float4_swiz_##_x##_y##_z##_w(xyzw) ); \
-
-#include <bx/float4_swizzle.inl>
-
-#undef IMPLEMENT_SWIZZLE
-#undef ELEMw
-#undef ELEMz
-#undef ELEMy
-#undef ELEMx
-}
-
-TEST(float4_shuffle)
-{
- const float4_t xyzw = float4_ild(0x78787878, 0x79797979, 0x7a7a7a7a, 0x77777777);
- const float4_t ABCD = float4_ild(0x41414141, 0x42424242, 0x43434343, 0x44444444);
- float4_check_string("xyAB", float4_shuf_xyAB(xyzw, ABCD) );
- float4_check_string("ABxy", float4_shuf_ABxy(xyzw, ABCD) );
- float4_check_string("zwCD", float4_shuf_zwCD(xyzw, ABCD) );
- float4_check_string("CDzw", float4_shuf_CDzw(xyzw, ABCD) );
- float4_check_string("xAyB", float4_shuf_xAyB(xyzw, ABCD) );
- float4_check_string("zCwD", float4_shuf_zCwD(xyzw, ABCD) );
- float4_check_string("xAzC", float4_shuf_xAzC(xyzw, ABCD) );
- float4_check_string("yBwD", float4_shuf_yBwD(xyzw, ABCD) );
- float4_check_string("CzDw", float4_shuf_CzDw(xyzw, ABCD) );
-}
-
-TEST(float4_compare)
-{
- float4_check_uint32("cmpeq"
- , float4_cmpeq(float4_ld(1.0f, 2.0f, 3.0f, 4.0f), float4_ld(0.0f, 2.0f, 0.0f, 3.0f) )
- , 0, 0xffffffff, 0, 0
- );
-
- float4_check_uint32("cmplt"
- , float4_cmplt(float4_ld(1.0f, 2.0f, 3.0f, 4.0f), float4_ld(0.0f, 2.0f, 0.0f, 3.0f) )
- , 0, 0, 0, 0
- );
-
- float4_check_uint32("cmple"
- , float4_cmple(float4_ld(1.0f, 2.0f, 3.0f, 4.0f), float4_ld(0.0f, 2.0f, 0.0f, 3.0f) )
- , 0, 0xffffffff, 0, 0
- );
-
- float4_check_uint32("cmpgt"
- , float4_cmpgt(float4_ld(1.0f, 2.0f, 3.0f, 4.0f), float4_ld(0.0f, 2.0f, 0.0f, 3.0f) )
- , 0xffffffff, 0, 0xffffffff, 0xffffffff
- );
-
- float4_check_uint32("cmpge"
- , float4_cmpge(float4_ld(1.0f, 2.0f, 3.0f, 4.0f), float4_ld(0.0f, 2.0f, 0.0f, 3.0f) )
- , 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
- );
-
- float4_check_uint32("icmpeq"
- , float4_icmpeq(float4_ild(0, 1, 2, 3), float4_ild(0, uint32_t(-2), 1, 3) )
- , 0xffffffff, 0, 0, 0xffffffff
- );
-
- float4_check_uint32("icmplt"
- , float4_icmplt(float4_ild(0, 1, 2, 3), float4_ild(0, uint32_t(-2), 1, 3) )
- , 0, 0, 0, 0
- );
-
- float4_check_uint32("icmpgt"
- , float4_icmpgt(float4_ild(0, 1, 2, 3), float4_ild(0, uint32_t(-2), 1, 3) )
- , 0, 0xffffffff, 0xffffffff, 0
- );
-}
-
-TEST(float4_test)
-{
- float4_check_bool("test_any_xyzw"
- , float4_test_any_xyzw(float4_ild(0xffffffff, 0, 0, 0) )
- , true
- );
-
- float4_check_bool("test_all_xyzw"
- , float4_test_all_xyzw(float4_ild(0xffffffff, 0, 0xffffffff, 0) )
- , false
- );
-
- float4_check_bool("test_all_xyzw"
- , float4_test_all_xyzw(float4_ild(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff) )
- , true
- );
-
- float4_check_bool("test_all_xw"
- , float4_test_all_xw(float4_ild(0xffffffff, 0, 0, 0xffffffff) )
- , true
- );
-
- float4_check_bool("test_all_xzw"
- , float4_test_all_xzw(float4_ild(0xffffffff, 0, 0, 0xffffffff) )
- , false
- );
-}
-
-TEST(float4_load)
-{
- float4_check_float("ld"
- , float4_ld(0.0f, 1.0f, 2.0f, 3.0f)
- , 0.0f, 1.0f, 2.0f, 3.0f
- );
-
- float4_check_int32("ild"
- , float4_ild(uint32_t(-1), 0, 1, 2)
- , uint32_t(-1), 0, 1, 2
- );
-
- float4_check_int32("ild"
- , float4_ild(uint32_t(-1), uint32_t(-2), uint32_t(-3), uint32_t(-4) )
- , uint32_t(-1), uint32_t(-2), uint32_t(-3), uint32_t(-4)
- );
-
- float4_check_uint32("zero", float4_zero()
- , 0, 0, 0, 0
- );
-
- float4_check_uint32("isplat", float4_isplat(0x80000001)
- , 0x80000001, 0x80000001, 0x80000001, 0x80000001
- );
-
- float4_check_float("isplat", float4_splat(1.0f)
- , 1.0f, 1.0f, 1.0f, 1.0f
- );
-}
-
-TEST(float4_arithmetic)
-{
- float4_check_float("madd"
- , float4_madd(float4_ld(0.0f, 1.0f, 2.0f, 3.0f), float4_ld(4.0f, 5.0f, 6.0f, 7.0f), float4_ld(8.0f, 9.0f, 10.0f, 11.0f) )
- , 8.0f, 14.0f, 22.0f, 32.0f
- );
-
- float4_check_float("cross3"
- , float4_cross3(float4_ld(1.0f, 0.0f, 0.0f, 0.0f), float4_ld(0.0f, 1.0f, 0.0f, 0.0f) )
- , 0.0f, 0.0f, 1.0f, 0.0f
- );
-}
-
-TEST(float4_sqrt)
-{
- float4_check_float("float4_sqrt"
- , float4_sqrt(float4_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
- , 1.0f, 4.0f, 256.0f, 351.363060096f
- );
-
- float4_check_float("float4_sqrt_nr_ni"
- , float4_sqrt_nr_ni(float4_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
- , 1.0f, 4.0f, 256.0f, 351.363060096f
- );
-
- float4_check_float("float4_sqrt_nr1_ni"
- , float4_sqrt_nr1_ni(float4_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
- , 1.0f, 4.0f, 256.0f, 351.363060096f
- );
-}
-
-TEST(float4)
-{
- const float4_t isplat = float4_isplat(0x80000001);
- float4_check_uint32("sll"
- , float4_sll(isplat, 1)
- , 0x00000002, 0x00000002, 0x00000002, 0x00000002
- );
-
- float4_check_uint32("srl"
- , float4_srl(isplat, 1)
- , 0x40000000, 0x40000000, 0x40000000, 0x40000000
- );
-
- float4_check_uint32("sra"
- , float4_sra(isplat, 1)
- , 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000
- );
-
- float4_check_uint32("and"
- , float4_and(float4_isplat(0x55555555), float4_isplat(0xaaaaaaaa) )
- , 0, 0, 0, 0
- );
-
- float4_check_uint32("or "
- , float4_or(float4_isplat(0x55555555), float4_isplat(0xaaaaaaaa) )
- , uint32_t(-1), uint32_t(-1), uint32_t(-1), uint32_t(-1)
- );
-
- float4_check_uint32("xor"
- , float4_or(float4_isplat(0x55555555), float4_isplat(0xaaaaaaaa) )
- , uint32_t(-1), uint32_t(-1), uint32_t(-1), uint32_t(-1)
- );
-
- float4_check_int32("imin"
- , float4_imin(float4_ild(0, 1, 2, 3), float4_ild(uint32_t(-1), 2, uint32_t(-2), 1) )
- , uint32_t(-1), 1, uint32_t(-2), 1
- );
-
- float4_check_float("min"
- , float4_min(float4_ld(0.0f, 1.0f, 2.0f, 3.0f), float4_ld(-1.0f, 2.0f, -2.0f, 1.0f) )
- , -1.0f, 1.0f, -2.0f, 1.0f
- );
-
- float4_check_int32("imax"
- , float4_imax(float4_ild(0, 1, 2, 3), float4_ild(uint32_t(-1), 2, uint32_t(-2), 1) )
- , 0, 2, 2, 3
- );
-
- float4_check_float("max"
- , float4_max(float4_ld(0.0f, 1.0f, 2.0f, 3.0f), float4_ld(-1.0f, 2.0f, -2.0f, 1.0f) )
- , 0.0f, 2.0f, 2.0f, 3.0f
- );
-}
diff --git a/3rdparty/bx/tests/simd_t.cpp b/3rdparty/bx/tests/simd_t.cpp
new file mode 100644
index 00000000000..999438a234a
--- /dev/null
+++ b/3rdparty/bx/tests/simd_t.cpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2010-2016 Branimir Karadzic. All rights reserved.
+ * License: https://github.com/bkaradzic/bx#license-bsd-2-clause
+ */
+
+#include "test.h"
+#include <bx/simd_t.h>
+#include <bx/fpumath.h>
+#include <string.h>
+
+using namespace bx;
+
+union simd_cast
+{
+ bx::simd128_t f4;
+ float f[4];
+ uint32_t ui[4];
+ int32_t i[4];
+ char c[16];
+};
+
+void simd_check_bool(const char* _str, bool _a, bool _0)
+{
+ DBG("%s %d == %d"
+ , _str
+ , _a
+ , _0
+ );
+
+ CHECK_EQUAL(_a, _0);
+}
+
+void simd_check_int32(const char* _str, bx::simd128_t _a, int32_t _0, int32_t _1, int32_t _2, int32_t _3)
+{
+ simd_cast c; c.f4 = _a;
+ DBG("%s (%d, %d, %d, %d) == (%d, %d, %d, %d)"
+ , _str
+ , c.i[0], c.i[1], c.i[2], c.i[3]
+ , _0, _1, _2, _3
+ );
+
+ CHECK_EQUAL(c.i[0], _0);
+ CHECK_EQUAL(c.i[1], _1);
+ CHECK_EQUAL(c.i[2], _2);
+ CHECK_EQUAL(c.i[3], _3);
+}
+
+void simd_check_uint32(const char* _str, bx::simd128_t _a, uint32_t _0, uint32_t _1, uint32_t _2, uint32_t _3)
+{
+ simd_cast c; c.f4 = _a;
+
+ DBG("%s (0x%08x, 0x%08x, 0x%08x, 0x%08x) == (0x%08x, 0x%08x, 0x%08x, 0x%08x)"
+ , _str
+ , c.ui[0], c.ui[1], c.ui[2], c.ui[3]
+ , _0, _1, _2, _3
+ );
+
+ CHECK_EQUAL(c.ui[0], _0);
+ CHECK_EQUAL(c.ui[1], _1);
+ CHECK_EQUAL(c.ui[2], _2);
+ CHECK_EQUAL(c.ui[3], _3);
+}
+
+void simd_check_float(const char* _str, bx::simd128_t _a, float _0, float _1, float _2, float _3)
+{
+ simd_cast c; c.f4 = _a;
+
+ DBG("%s (%f, %f, %f, %f) == (%f, %f, %f, %f)"
+ , _str
+ , c.f[0], c.f[1], c.f[2], c.f[3]
+ , _0, _1, _2, _3
+ );
+
+ CHECK(bx::fequal(c.f[0], _0, 0.0001f) );
+ CHECK(bx::fequal(c.f[1], _1, 0.0001f) );
+ CHECK(bx::fequal(c.f[2], _2, 0.0001f) );
+ CHECK(bx::fequal(c.f[3], _3, 0.0001f) );
+}
+
+void simd_check_string(const char* _str, bx::simd128_t _a)
+{
+ simd_cast c; c.f4 = _a;
+ const char test[5] = { c.c[0], c.c[4], c.c[8], c.c[12], '\0' };
+
+ DBG("%s %s", _str, test);
+
+ CHECK(0 == strcmp(_str, test) );
+}
+
+TEST(simd_swizzle)
+{
+ const simd128_t xyzw = simd_ild(0x78787878, 0x79797979, 0x7a7a7a7a, 0x77777777);
+
+#define ELEMx 0
+#define ELEMy 1
+#define ELEMz 2
+#define ELEMw 3
+#define BX_SIMD128_IMPLEMENT_SWIZZLE(_x, _y, _z, _w) \
+ simd_check_string("" #_x #_y #_z #_w "", simd_swiz_##_x##_y##_z##_w(xyzw) ); \
+
+#include <bx/simd128_swizzle.inl>
+
+#undef BX_SIMD128_IMPLEMENT_SWIZZLE
+#undef ELEMw
+#undef ELEMz
+#undef ELEMy
+#undef ELEMx
+}
+
+TEST(simd_shuffle)
+{
+ const simd128_t xyzw = simd_ild(0x78787878, 0x79797979, 0x7a7a7a7a, 0x77777777);
+ const simd128_t ABCD = simd_ild(0x41414141, 0x42424242, 0x43434343, 0x44444444);
+ simd_check_string("xyAB", simd_shuf_xyAB(xyzw, ABCD) );
+ simd_check_string("ABxy", simd_shuf_ABxy(xyzw, ABCD) );
+ simd_check_string("zwCD", simd_shuf_zwCD(xyzw, ABCD) );
+ simd_check_string("CDzw", simd_shuf_CDzw(xyzw, ABCD) );
+ simd_check_string("xAyB", simd_shuf_xAyB(xyzw, ABCD) );
+ simd_check_string("zCwD", simd_shuf_zCwD(xyzw, ABCD) );
+ simd_check_string("xAzC", simd_shuf_xAzC(xyzw, ABCD) );
+ simd_check_string("yBwD", simd_shuf_yBwD(xyzw, ABCD) );
+ simd_check_string("CzDw", simd_shuf_CzDw(xyzw, ABCD) );
+}
+
+TEST(simd_compare)
+{
+ simd_check_uint32("cmpeq"
+ , simd_cmpeq(simd_ld(1.0f, 2.0f, 3.0f, 4.0f), simd_ld(0.0f, 2.0f, 0.0f, 3.0f) )
+ , 0, 0xffffffff, 0, 0
+ );
+
+ simd_check_uint32("cmplt"
+ , simd_cmplt(simd_ld(1.0f, 2.0f, 3.0f, 4.0f), simd_ld(0.0f, 2.0f, 0.0f, 3.0f) )
+ , 0, 0, 0, 0
+ );
+
+ simd_check_uint32("cmple"
+ , simd_cmple(simd_ld(1.0f, 2.0f, 3.0f, 4.0f), simd_ld(0.0f, 2.0f, 0.0f, 3.0f) )
+ , 0, 0xffffffff, 0, 0
+ );
+
+ simd_check_uint32("cmpgt"
+ , simd_cmpgt(simd_ld(1.0f, 2.0f, 3.0f, 4.0f), simd_ld(0.0f, 2.0f, 0.0f, 3.0f) )
+ , 0xffffffff, 0, 0xffffffff, 0xffffffff
+ );
+
+ simd_check_uint32("cmpge"
+ , simd_cmpge(simd_ld(1.0f, 2.0f, 3.0f, 4.0f), simd_ld(0.0f, 2.0f, 0.0f, 3.0f) )
+ , 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
+ );
+
+ simd_check_uint32("icmpeq"
+ , simd_icmpeq(simd_ild(0, 1, 2, 3), simd_ild(0, uint32_t(-2), 1, 3) )
+ , 0xffffffff, 0, 0, 0xffffffff
+ );
+
+ simd_check_uint32("icmplt"
+ , simd_icmplt(simd_ild(0, 1, 2, 3), simd_ild(0, uint32_t(-2), 1, 3) )
+ , 0, 0, 0, 0
+ );
+
+ simd_check_uint32("icmpgt"
+ , simd_icmpgt(simd_ild(0, 1, 2, 3), simd_ild(0, uint32_t(-2), 1, 3) )
+ , 0, 0xffffffff, 0xffffffff, 0
+ );
+}
+
+TEST(simd_test)
+{
+ simd_check_bool("test_any_xyzw"
+ , simd_test_any_xyzw(simd_ild(0xffffffff, 0, 0, 0) )
+ , true
+ );
+
+ simd_check_bool("test_all_xyzw"
+ , simd_test_all_xyzw(simd_ild(0xffffffff, 0, 0xffffffff, 0) )
+ , false
+ );
+
+ simd_check_bool("test_all_xyzw"
+ , simd_test_all_xyzw(simd_ild(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff) )
+ , true
+ );
+
+ simd_check_bool("test_all_xw"
+ , simd_test_all_xw(simd_ild(0xffffffff, 0, 0, 0xffffffff) )
+ , true
+ );
+
+ simd_check_bool("test_all_xzw"
+ , simd_test_all_xzw(simd_ild(0xffffffff, 0, 0, 0xffffffff) )
+ , false
+ );
+}
+
+TEST(simd_load)
+{
+ simd_check_float("ld"
+ , simd_ld(0.0f, 1.0f, 2.0f, 3.0f)
+ , 0.0f, 1.0f, 2.0f, 3.0f
+ );
+
+ simd_check_int32("ild"
+ , simd_ild(uint32_t(-1), 0, 1, 2)
+ , uint32_t(-1), 0, 1, 2
+ );
+
+ simd_check_int32("ild"
+ , simd_ild(uint32_t(-1), uint32_t(-2), uint32_t(-3), uint32_t(-4) )
+ , uint32_t(-1), uint32_t(-2), uint32_t(-3), uint32_t(-4)
+ );
+
+ simd_check_uint32("zero", simd_zero()
+ , 0, 0, 0, 0
+ );
+
+ simd_check_uint32("isplat", simd_isplat(0x80000001)
+ , 0x80000001, 0x80000001, 0x80000001, 0x80000001
+ );
+
+ simd_check_float("isplat", simd_splat(1.0f)
+ , 1.0f, 1.0f, 1.0f, 1.0f
+ );
+}
+
+TEST(simd_arithmetic)
+{
+ simd_check_float("madd"
+ , simd_madd(simd_ld(0.0f, 1.0f, 2.0f, 3.0f), simd_ld(4.0f, 5.0f, 6.0f, 7.0f), simd_ld(8.0f, 9.0f, 10.0f, 11.0f) )
+ , 8.0f, 14.0f, 22.0f, 32.0f
+ );
+
+ simd_check_float("cross3"
+ , simd_cross3(simd_ld(1.0f, 0.0f, 0.0f, 0.0f), simd_ld(0.0f, 1.0f, 0.0f, 0.0f) )
+ , 0.0f, 0.0f, 1.0f, 0.0f
+ );
+}
+
+TEST(simd_sqrt)
+{
+ simd_check_float("simd_sqrt"
+ , simd_sqrt(simd_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
+ , 1.0f, 4.0f, 256.0f, 351.363060096f
+ );
+
+ simd_check_float("simd_sqrt_nr_ni"
+ , simd_sqrt_nr_ni(simd_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
+ , 1.0f, 4.0f, 256.0f, 351.363060096f
+ );
+
+ simd_check_float("simd_sqrt_nr1_ni"
+ , simd_sqrt_nr1_ni(simd_ld(1.0f, 16.0f, 65536.0f, 123456.0f) )
+ , 1.0f, 4.0f, 256.0f, 351.363060096f
+ );
+}
+
+TEST(float4)
+{
+ const simd128_t isplat = simd_isplat(0x80000001);
+ simd_check_uint32("sll"
+ , simd_sll(isplat, 1)
+ , 0x00000002, 0x00000002, 0x00000002, 0x00000002
+ );
+
+ simd_check_uint32("srl"
+ , simd_srl(isplat, 1)
+ , 0x40000000, 0x40000000, 0x40000000, 0x40000000
+ );
+
+ simd_check_uint32("sra"
+ , simd_sra(isplat, 1)
+ , 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000
+ );
+
+ simd_check_uint32("and"
+ , simd_and(simd_isplat(0x55555555), simd_isplat(0xaaaaaaaa) )
+ , 0, 0, 0, 0
+ );
+
+ simd_check_uint32("or "
+ , simd_or(simd_isplat(0x55555555), simd_isplat(0xaaaaaaaa) )
+ , uint32_t(-1), uint32_t(-1), uint32_t(-1), uint32_t(-1)
+ );
+
+ simd_check_uint32("xor"
+ , simd_or(simd_isplat(0x55555555), simd_isplat(0xaaaaaaaa) )
+ , uint32_t(-1), uint32_t(-1), uint32_t(-1), uint32_t(-1)
+ );
+
+ simd_check_int32("imin"
+ , simd_imin(simd_ild(0, 1, 2, 3), simd_ild(uint32_t(-1), 2, uint32_t(-2), 1) )
+ , uint32_t(-1), 1, uint32_t(-2), 1
+ );
+
+ simd_check_float("min"
+ , simd_min(simd_ld(0.0f, 1.0f, 2.0f, 3.0f), simd_ld(-1.0f, 2.0f, -2.0f, 1.0f) )
+ , -1.0f, 1.0f, -2.0f, 1.0f
+ );
+
+ simd_check_int32("imax"
+ , simd_imax(simd_ild(0, 1, 2, 3), simd_ild(uint32_t(-1), 2, uint32_t(-2), 1) )
+ , 0, 2, 2, 3
+ );
+
+ simd_check_float("max"
+ , simd_max(simd_ld(0.0f, 1.0f, 2.0f, 3.0f), simd_ld(-1.0f, 2.0f, -2.0f, 1.0f) )
+ , 0.0f, 2.0f, 2.0f, 3.0f
+ );
+}
diff --git a/3rdparty/bx/tests/vector_nodefault.cpp b/3rdparty/bx/tests/vector_nodefault.cpp
index 4a8bc8cf1dd..1340b0c0b3d 100644
--- a/3rdparty/bx/tests/vector_nodefault.cpp
+++ b/3rdparty/bx/tests/vector_nodefault.cpp
@@ -33,10 +33,14 @@
#include <string.h>
#include <stdlib.h>
+#if !BX_CRT_MSVC
+# define _strdup strdup
+#endif // !BX_CRT_MSVC
+
struct nodefault {
- nodefault(const char* s) { data = strdup(s); }
+ nodefault(const char* s) { data = _strdup(s); }
~nodefault() { free(data); }
- nodefault(const nodefault& other) { data = 0; if (other.data) data = strdup(other.data); }
+ nodefault(const nodefault& other) { data = 0; if (other.data) data = _strdup(other.data); }
nodefault& operator=(const nodefault& other) { nodefault(other).swap(*this); return *this; }
void swap(nodefault& other) { std::swap(data, other.data); }
@@ -148,7 +152,7 @@ TEST(vector_nodefault_popback) {
v.push_back("24");
CHECK(v.back() == "24");
-
+
v.pop_back();
CHECK(v.back() == "12");
diff --git a/3rdparty/bx/tools/bin/darwin/genie b/3rdparty/bx/tools/bin/darwin/genie
index a092d05c1d2..ca02eb91450 100755
--- a/3rdparty/bx/tools/bin/darwin/genie
+++ b/3rdparty/bx/tools/bin/darwin/genie
Binary files differ
diff --git a/3rdparty/bx/tools/bin/linux/genie b/3rdparty/bx/tools/bin/linux/genie
index b497f5dc9b1..7b68c735e1b 100755
--- a/3rdparty/bx/tools/bin/linux/genie
+++ b/3rdparty/bx/tools/bin/linux/genie
Binary files differ
diff --git a/3rdparty/bx/tools/bin/windows/genie.exe b/3rdparty/bx/tools/bin/windows/genie.exe
index 8111412afcf..1fd05d21798 100644
--- a/3rdparty/bx/tools/bin/windows/genie.exe
+++ b/3rdparty/bx/tools/bin/windows/genie.exe
Binary files differ