Clutter Engine 0.0.1
Loading...
Searching...
No Matches
common.h
Go to the documentation of this file.
1
3
4#pragma once
5
6#include "platform.h"
7
8#if GLM_ARCH & GLM_ARCH_SSE2_BIT
9
10GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b)
11{
12 return _mm_add_ps(a, b);
13}
14
15GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b)
16{
17 return _mm_add_ss(a, b);
18}
19
20GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b)
21{
22 return _mm_sub_ps(a, b);
23}
24
25GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b)
26{
27 return _mm_sub_ss(a, b);
28}
29
30GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b)
31{
32 return _mm_mul_ps(a, b);
33}
34
35GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b)
36{
37 return _mm_mul_ss(a, b);
38}
39
40GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b)
41{
42 return _mm_div_ps(a, b);
43}
44
45GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b)
46{
47 return _mm_div_ss(a, b);
48}
49
50GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b)
51{
52 return glm_vec4_mul(a, _mm_rcp_ps(b));
53}
54
55GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a)
56{
57# if GLM_ARCH & GLM_ARCH_AVX2_BIT
58 return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0));
59# else
60 return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0));
61# endif
62}
63
64GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
65{
66# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
67 return _mm_fmadd_ss(a, b, c);
68# else
69 return _mm_add_ss(_mm_mul_ss(a, b), c);
70# endif
71}
72
73GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
74{
75# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
76 return _mm_fmadd_ps(a, b, c);
77# else
78 return glm_vec4_add(glm_vec4_mul(a, b), c);
79# endif
80}
81
82GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x)
83{
84 return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)));
85}
86
87GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x)
88{
89# if GLM_ARCH & GLM_ARCH_SSSE3_BIT
90 return _mm_sign_epi32(x, x);
91# else
92 glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31);
93 glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0);
94 glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0);
95 return sub0;
96# endif
97}
98
99GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x)
100{
101 glm_vec4 const zro0 = _mm_setzero_ps();
102 glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0);
103 glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0);
104 glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f));
105 glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f));
106 glm_vec4 const or0 = _mm_or_ps(and0, and1);
107 return or0;
108}
109
110GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x)
111{
112# if GLM_ARCH & GLM_ARCH_SSE41_BIT
113 return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT);
114# else
115 glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
116 glm_vec4 const and0 = _mm_and_ps(sgn0, x);
117 glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
118 glm_vec4 const add0 = glm_vec4_add(x, or0);
119 glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
120 return sub0;
121# endif
122}
123
124GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x)
125{
126# if GLM_ARCH & GLM_ARCH_SSE41_BIT
127 return _mm_floor_ps(x);
128# else
129 glm_vec4 const rnd0 = glm_vec4_round(x);
130 glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0);
131 glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
132 glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0);
133 return sub0;
134# endif
135}
136
137/* trunc TODO
138GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x)
139{
140 return glm_vec4();
141}
142*/
143
144//roundEven
145GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x)
146{
147 glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
148 glm_vec4 const and0 = _mm_and_ps(sgn0, x);
149 glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
150 glm_vec4 const add0 = glm_vec4_add(x, or0);
151 glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
152 return sub0;
153}
154
155GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x)
156{
157# if GLM_ARCH & GLM_ARCH_SSE41_BIT
158 return _mm_ceil_ps(x);
159# else
160 glm_vec4 const rnd0 = glm_vec4_round(x);
161 glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0);
162 glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
163 glm_vec4 const add0 = glm_vec4_add(rnd0, and0);
164 return add0;
165# endif
166}
167
168GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x)
169{
170 glm_vec4 const flr0 = glm_vec4_floor(x);
171 glm_vec4 const sub0 = glm_vec4_sub(x, flr0);
172 return sub0;
173}
174
175GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y)
176{
177 glm_vec4 const div0 = glm_vec4_div(x, y);
178 glm_vec4 const flr0 = glm_vec4_floor(div0);
179 glm_vec4 const mul0 = glm_vec4_mul(y, flr0);
180 glm_vec4 const sub0 = glm_vec4_sub(x, mul0);
181 return sub0;
182}
183
184GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal)
185{
186 glm_vec4 const min0 = _mm_min_ps(v, maxVal);
187 glm_vec4 const max0 = _mm_max_ps(min0, minVal);
188 return max0;
189}
190
191GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a)
192{
193 glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a);
194 glm_vec4 const mul0 = glm_vec4_mul(v1, sub0);
195 glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0);
196 return mad0;
197}
198
199GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x)
200{
201 glm_vec4 const cmp = _mm_cmple_ps(x, edge);
202 return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps();
203}
204
205GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x)
206{
207 glm_vec4 const sub0 = glm_vec4_sub(x, edge0);
208 glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0);
209 glm_vec4 const div0 = glm_vec4_sub(sub0, sub1);
210 glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f));
211 glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0);
212 glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0);
213 glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0);
214 glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2);
215 return mul2;
216}
217
218// Agner Fog method
219GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x)
220{
221 glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
222 glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
223 glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask
224 glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent
225 glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction
226 glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4);
227 glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128());
228 glm_ivec4 const And = _mm_and_si128(Equal, Nequal);
229 return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
230}
231
232// Agner Fog method
233GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x)
234{
235 glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
236 glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
237 return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0
238}
239
240#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT