Blender V4.3
math_half.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2024 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "BLI_math_half.hh"
10
11#if defined(__ARM_NEON)
12/* Use ARM FP16 conversion instructions */
13# define USE_HARDWARE_FP16_NEON
14# include <arm_neon.h>
15#endif
16#if (defined(__x86_64__) || defined(_M_X64))
17/* All AVX2 CPUs have F16C instructions, so use those if we're compiling for AVX2.
18 * Otherwise use "manual" SSE2 4x-wide conversion. */
19# if defined(__AVX2__)
20# define USE_HARDWARE_FP16_F16C
21# else
22# define USE_SSE2_FP16
23# endif
24# include <immintrin.h>
25#endif
26
28{
29#if defined(USE_HARDWARE_FP16_NEON)
30 float16x4_t h4 = vcvt_f16_f32(vdupq_n_f32(v));
31 float16_t h = vget_lane_f16(h4, 0);
32 return *(uint16_t *)&h;
33#else
34 /* Based on float_to_half_fast3_rtne from public domain https://gist.github.com/rygorous/2156668
35 * see corresponding blog post https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
36 */
37 union FP32 {
38 uint32_t u;
39 float f;
40 };
41 FP32 f;
42 f.f = v;
43 FP32 f32infty = {255 << 23};
44 FP32 f16max = {(127 + 16) << 23};
45 FP32 denorm_magic = {((127 - 15) + (23 - 10) + 1) << 23};
46 uint32_t sign_mask = 0x80000000u;
47 uint16_t o = {0};
48
49 uint32_t sign = f.u & sign_mask;
50 f.u ^= sign;
51
52 /*
53 * NOTE all the integer compares in this function can be safely
54 * compiled into signed compares since all operands are below
55 * 0x80000000. Important if you want fast straight SSE2 code
56 * (since there's no unsigned PCMPGTD).
57 */
58 if (f.u >= f16max.u) {
59 /* result is Inf or NaN (all exponent bits set) */
60 o = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; /* NaN->qNaN and Inf->Inf */
61 }
62 else {
63 /* (De)normalized number or zero */
64 if (f.u < (113 << 23)) {
65 /* Resulting FP16 is subnormal or zero.
66 * Use a magic value to align our 10 mantissa bits at the bottom of
67 * the float. as long as FP addition is round-to-nearest-even this
68 * just works. */
69 f.f += denorm_magic.f;
70
71 /* and one integer subtract of the bias later, we have our final float! */
72 o = f.u - denorm_magic.u;
73 }
74 else {
75 uint32_t mant_odd = (f.u >> 13) & 1; /* resulting mantissa is odd */
76
77 /* update exponent, rounding bias part 1 */
78 f.u += (uint32_t(15 - 127) << 23) + 0xfff;
79 /* rounding bias part 2 */
80 f.u += mant_odd;
81 /* take the bits! */
82 o = f.u >> 13;
83 }
84 }
85
86 o |= sign >> 16;
87 return o;
88#endif
89}
90
92{
93#if defined(USE_HARDWARE_FP16_NEON)
94 uint16x4_t v4 = vdup_n_u16(v);
95 float16x4_t h4 = vreinterpret_f16_u16(v4);
96 float32x4_t f4 = vcvt_f32_f16(h4);
97 return vgetq_lane_f32(f4, 0);
98#else
99 /* Based on half_to_float_fast4 from public domain https://gist.github.com/rygorous/2144712
100 * see corresponding blog post https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
101 */
102 union FP32 {
103 uint32_t u;
104 float f;
105 };
106 constexpr FP32 magic = {113 << 23};
107 constexpr uint32_t shifted_exp = 0x7c00 << 13; /* exponent mask after shift */
108 FP32 o;
109
110 o.u = (v & 0x7fff) << 13; /* exponent/mantissa bits */
111 uint32_t exp = shifted_exp & o.u; /* just the exponent */
112 o.u += (127 - 15) << 23; /* exponent adjust */
113
114 /* handle exponent special cases */
115 if (exp == shifted_exp) { /* Inf/NaN? */
116 o.u += (128 - 16) << 23; /* extra exp adjust */
117 }
118 else if (exp == 0) { /* Zero/Denormal? */
119 o.u += 1 << 23; /* extra exp adjust */
120 o.f -= magic.f; /* renormalize */
121 }
122
123 o.u |= (v & 0x8000) << 16; /* sign bit */
124 return o.f;
125#endif
126}
127
128#ifdef USE_SSE2_FP16
129/* 4x wide float<->half conversion using SSE2 code, based on
130 * https://gist.github.com/rygorous/4d9e9e88cab13c703773dc767a23575f */
131
132/* Float->half conversion with round-to-nearest-even, SSE2+.
133 * leaves half-floats in 32-bit lanes (sign extended). */
134static inline __m128i F32_to_F16_4x(const __m128 &f)
135{
136 const __m128 mask_sign = _mm_set1_ps(-0.0f);
137 /* all FP32 values >=this round to +inf */
138 const __m128i c_f16max = _mm_set1_epi32((127 + 16) << 23);
139 const __m128i c_nanbit = _mm_set1_epi32(0x200);
140 const __m128i c_nanlobits = _mm_set1_epi32(0x1ff);
141 const __m128i c_infty_as_fp16 = _mm_set1_epi32(0x7c00);
142 /* smallest FP32 that yields a normalized FP16 */
143 const __m128i c_min_normal = _mm_set1_epi32((127 - 14) << 23);
144 const __m128i c_subnorm_magic = _mm_set1_epi32(((127 - 15) + (23 - 10) + 1) << 23);
145 /* adjust exponent and add mantissa rounding */
146 const __m128i c_normal_bias = _mm_set1_epi32(0xfff - ((127 - 15) << 23));
147
148 __m128 justsign = _mm_and_ps(f, mask_sign);
149 __m128 absf = _mm_andnot_ps(mask_sign, f); /* f & ~mask_sign */
150 /* the cast is "free" (extra bypass latency, but no throughput hit) */
151 __m128i absf_int = _mm_castps_si128(absf);
152 __m128 b_isnan = _mm_cmpunord_ps(absf, absf); /* is this a NaN? */
153 __m128i b_isregular = _mm_cmpgt_epi32(c_f16max, absf_int); /* (sub)normalized or special? */
154 __m128i nan_payload = _mm_and_si128(_mm_srli_epi32(absf_int, 13),
155 c_nanlobits); /* payload bits for NaNs */
156 __m128i nan_quiet = _mm_or_si128(nan_payload, c_nanbit); /* and set quiet bit */
157 __m128i nanfinal = _mm_and_si128(_mm_castps_si128(b_isnan), nan_quiet);
158 __m128i inf_or_nan = _mm_or_si128(nanfinal, c_infty_as_fp16); /* output for specials */
159
160 /* subnormal? */
161 __m128i b_issub = _mm_cmpgt_epi32(c_min_normal, absf_int);
162
163 /* "result is subnormal" path */
164 __m128 subnorm1 = _mm_add_ps(
165 absf, _mm_castsi128_ps(c_subnorm_magic)); /* magic value to round output mantissa */
166 __m128i subnorm2 = _mm_sub_epi32(_mm_castps_si128(subnorm1),
167 c_subnorm_magic); /* subtract out bias */
168
169 /* "result is normal" path */
170 __m128i mantoddbit = _mm_slli_epi32(absf_int, 31 - 13); /* shift bit 13 (mantissa LSB) to sign */
171 __m128i mantodd = _mm_srai_epi32(mantoddbit, 31); /* -1 if FP16 mantissa odd, else 0 */
172
173 __m128i round1 = _mm_add_epi32(absf_int, c_normal_bias);
174 /* if mantissa LSB odd, bias towards rounding up (RTNE) */
175 __m128i round2 = _mm_sub_epi32(round1, mantodd);
176 __m128i normal = _mm_srli_epi32(round2, 13); /* rounded result */
177
178 /* combine the two non-specials */
179 __m128i nonspecial = _mm_or_si128(_mm_and_si128(subnorm2, b_issub),
180 _mm_andnot_si128(b_issub, normal));
181
182 /* merge in specials as well */
183 __m128i joined = _mm_or_si128(_mm_and_si128(nonspecial, b_isregular),
184 _mm_andnot_si128(b_isregular, inf_or_nan));
185
186 __m128i sign_shift = _mm_srai_epi32(_mm_castps_si128(justsign), 16);
187 __m128i result = _mm_or_si128(joined, sign_shift);
188
189 return result;
190}
191
192/* Half->float conversion, SSE2+. Input in 32-bit lanes. */
193static inline __m128 F16_to_F32_4x(const __m128i &h)
194{
195 const __m128i mask_nosign = _mm_set1_epi32(0x7fff);
196 const __m128 magic_mult = _mm_castsi128_ps(_mm_set1_epi32((254 - 15) << 23));
197 const __m128i was_infnan = _mm_set1_epi32(0x7bff);
198 const __m128 exp_infnan = _mm_castsi128_ps(_mm_set1_epi32(255 << 23));
199 const __m128i was_nan = _mm_set1_epi32(0x7c00);
200 const __m128i nan_quiet = _mm_set1_epi32(1 << 22);
201
202 __m128i expmant = _mm_and_si128(mask_nosign, h);
203 __m128i justsign = _mm_xor_si128(h, expmant);
204 __m128i shifted = _mm_slli_epi32(expmant, 13);
205 __m128 scaled = _mm_mul_ps(_mm_castsi128_ps(shifted), magic_mult);
206 __m128i b_wasinfnan = _mm_cmpgt_epi32(expmant, was_infnan);
207 __m128i sign = _mm_slli_epi32(justsign, 16);
208 __m128 infnanexp = _mm_and_ps(_mm_castsi128_ps(b_wasinfnan), exp_infnan);
209 __m128i b_wasnan = _mm_cmpgt_epi32(expmant, was_nan);
210 __m128i nanquiet = _mm_and_si128(b_wasnan, nan_quiet);
211 __m128 infnandone = _mm_or_ps(infnanexp, _mm_castsi128_ps(nanquiet));
212
213 __m128 sign_inf = _mm_or_ps(_mm_castsi128_ps(sign), infnandone);
214 __m128 result = _mm_or_ps(scaled, sign_inf);
215
216 return result;
217}
218
219#endif // USE_SSE2_FP16
220
221void blender::math::float_to_half_array(const float *src, uint16_t *dst, size_t length)
222{
223 size_t i = 0;
224#if defined(USE_HARDWARE_FP16_F16C) /* 8-wide loop using AVX2 F16C */
225 for (; i + 7 < length; i += 8) {
226 __m256 src8 = _mm256_loadu_ps(src);
227 __m128i h8 = _mm256_cvtps_ph(src8, _MM_FROUND_TO_NEAREST_INT);
228 _mm_storeu_epi32(dst, h8);
229 src += 8;
230 dst += 8;
231 }
232#elif defined(USE_SSE2_FP16) /* 4-wide loop using SSE2 */
233 for (; i + 3 < length; i += 4) {
234 __m128 src4 = _mm_loadu_ps(src);
235 __m128i h4 = F32_to_F16_4x(src4);
236 __m128i h4_packed = _mm_packs_epi32(h4, h4);
237 _mm_storeu_si64(dst, h4_packed);
238 src += 4;
239 dst += 4;
240 }
241#elif defined(USE_HARDWARE_FP16_NEON) /* 4-wide loop using NEON */
242 for (; i + 3 < length; i += 4) {
243 float32x4_t src4 = vld1q_f32(src);
244 float16x4_t h4 = vcvt_f16_f32(src4);
245 vst1_f16((float16_t *)dst, h4);
246 src += 4;
247 dst += 4;
248 }
249#endif
250 /* Use scalar path to convert the tail of array (or whole array if none of
251 * wider paths above were used). */
252 for (; i < length; i++) {
253 *dst++ = float_to_half(*src++);
254 }
255}
256
257void blender::math::half_to_float_array(const uint16_t *src, float *dst, size_t length)
258{
259 size_t i = 0;
260#if defined(USE_HARDWARE_FP16_F16C) /* 8-wide loop using AVX2 F16C */
261 for (; i + 7 < length; i += 8) {
262 __m128i src8 = _mm_loadu_epi32(src);
263 __m256 f8 = _mm256_cvtph_ps(src8);
264 _mm256_storeu_ps(dst, f8);
265 src += 8;
266 dst += 8;
267 }
268#elif defined(USE_SSE2_FP16) /* 4-wide loop using SSE2 */
269 for (; i + 3 < length; i += 4) {
270 __m128i src4 = _mm_loadu_si64(src);
271 src4 = _mm_unpacklo_epi16(src4, src4);
272 __m128 f4 = F16_to_F32_4x(src4);
273 _mm_storeu_ps(dst, f4);
274 src += 4;
275 dst += 4;
276 }
277#elif defined(USE_HARDWARE_FP16_NEON) /* 4-wide loop using NEON */
278 for (; i + 3 < length; i += 4) {
279 float16x4_t src4 = vld1_f16((const float16_t *)src);
280 float32x4_t f4 = vcvt_f32_f16(src4);
281 vst1q_f32(dst, f4);
282 src += 4;
283 dst += 4;
284 }
285#endif
286 /* Use scalar path to convert the tail of array (or whole array if none of
287 * wider paths above were used). */
288 for (; i < length; i++) {
289 *dst++ = half_to_float(*src++);
290 }
291}
292
293#ifdef USE_HARDWARE_FP16_NEON
294# undef USE_HARDWARE_FP16_NEON
295#endif
ATTR_WARN_UNUSED_RESULT const BMVert * v
btMatrix3x3 scaled(const btVector3 &s) const
Create a scaled copy of the matrix.
SIMD_FORCE_INLINE btScalar length() const
Return the length of the vector.
Definition btVector3.h:257
ccl_device_inline float3 exp(float3 v)
void float_to_half_array(const float *src, uint16_t *dst, size_t length)
Definition math_half.cc:221
uint16_t float_to_half(float v)
Definition math_half.cc:27
T sign(const T &a)
void half_to_float_array(const uint16_t *src, float *dst, size_t length)
Definition math_half.cc:257
float half_to_float(uint16_t v)
Definition math_half.cc:91
unsigned short uint16_t
Definition stdint.h:79
unsigned int uint32_t
Definition stdint.h:80
static int magic(const Tex *tex, const float texvec[3], TexResult *texres)