1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
// license:BSD-3-Clause
// copyright-holders:Tyler J. Stachecki,Ryan Holtz
inline rsp_vec_t vec_veq_vge_vlt_vne(uint32_t iw, rsp_vec_t vs, rsp_vec_t vt, rsp_vec_t zero, rsp_vec_t *le, rsp_vec_t eq, rsp_vec_t sign)
{
rsp_vec_t equal = _mm_cmpeq_epi16(vs, vt);
if (iw & 0x2) // VNE & VGE
{
if (iw & 0x1) // VGE
{
rsp_vec_t gt = _mm_cmpgt_epi16(vs, vt);
rsp_vec_t equalsign = _mm_and_si128(eq, sign);
equal = _mm_andnot_si128(equalsign, equal);
*le = _mm_or_si128(gt, equal);
}
else // VNE
{
rsp_vec_t nequal = _mm_cmpeq_epi16(equal, zero);
*le = _mm_and_si128(eq, equal);
*le = _mm_or_si128(*le, nequal);
}
}
else // VEQ & VLT
{
if (iw & 0x1) // VEQ
{
*le = _mm_andnot_si128(eq, equal);
}
else // VLT
{
rsp_vec_t lt = _mm_cmplt_epi16(vs, vt);
equal = _mm_and_si128(eq, equal);
equal = _mm_and_si128(sign, equal);
*le = _mm_or_si128(lt, equal);
}
}
#if (defined(__SSE4_1__) || defined(_MSC_VER))
return _mm_blendv_epi8(vt, vs, *le);
#else
vs = _mm_and_si128(*le, vs);
vt = _mm_andnot_si128(*le, vt);
return _mm_or_si128(vs, vt);
#endif
}
|