Blender V5.0
GPU_attribute_convert.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2025 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#pragma once
6
7#include <algorithm>
8#include <limits>
9
11#include "BLI_span.hh"
12
13namespace blender::gpu {
14
16 int x : 10;
17 int y : 10;
18 int z : 10;
19 int w : 2; /* 0 by default, can manually set to { -2, -1, 0, 1 } */
20
21 PackedNormal() = default;
22 PackedNormal(int _x, int _y, int _z, int _w = 0) : x(_x), y(_y), z(_z), w(_w) {}
23
24 /* Cast from int to float. */
25 operator float4()
26 {
27 return float4(x, y, z, w);
28 }
29};
30
32{
33 /* OpenGL ES packs in a different order as desktop GL but component conversion is the same.
34 * Of the code here, only PackedNormal needs to change. */
35 constexpr int signed_int_10_max = 511;
36 constexpr int signed_int_10_min = -512;
37
38 int qx = x * signed_int_10_max;
39 return std::clamp(qx, signed_int_10_min, signed_int_10_max);
40}
41
42template<typename GPUType> inline GPUType convert_normal(const float3 &src);
43
44template<> inline PackedNormal convert_normal(const float3 &src)
45{
46 return {
50 0,
51 };
52}
53
54template<> inline short4 convert_normal(const float3 &src)
55{
56 return short4(src * float(std::numeric_limits<short>::max()), 0);
57}
58
59template<typename GPUType> void convert_normals(Span<float3> src, MutableSpan<GPUType> dst);
60
61} // namespace blender::gpu
GPUType
GPUType convert_normal(const float3 &src)
void convert_normals(Span< float3 > src, MutableSpan< GPUType > dst)
int convert_normalized_f32_to_i10(float x)
blender::VecBase< int16_t, 4 > short4
VecBase< float, 4 > float4
VecBase< float, 3 > float3
PackedNormal(int _x, int _y, int _z, int _w=0)