Blender V4.3
draw_cache_impl_pointcloud.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include <cstring>
12
13#include "MEM_guardedalloc.h"
14
15#include "BLI_listbase.h"
16#include "BLI_math_base.h"
17#include "BLI_math_color.hh"
18#include "BLI_math_vector.h"
19#include "BLI_task.hh"
20#include "BLI_utildefines.h"
21
22#include "DNA_object_types.h"
24
25#include "BKE_attribute.hh"
26#include "BKE_pointcloud.hh"
27
28#include "GPU_batch.hh"
29#include "GPU_material.hh"
30
31#include "draw_attributes.hh"
32#include "draw_cache_impl.hh"
33#include "draw_cache_inline.hh"
34#include "draw_pointcloud_private.hh" /* own include */
35
36namespace blender::draw {
37
38/* -------------------------------------------------------------------- */
43 /* Dot primitive types. */
44 gpu::Batch *dots;
45 /* Triangle primitive types. */
46 gpu::Batch *surface;
47 gpu::Batch **surface_per_mat;
48
49 /* Triangles indices to draw the points. */
51
52 /* Position and radius. */
54 /* Active attribute in 3D view. */
56 /* Requested attributes */
58
66
73
75};
76
79
80 /* settings to determine if cache is invalid */
82
88 std::mutex render_mutex;
89};
90
92{
93 return static_cast<PointCloudBatchCache *>(pointcloud.batch_cache);
94}
95
97{
99
100 if (cache == nullptr) {
101 return false;
102 }
103 if (cache->eval_cache.mat_len != DRW_pointcloud_material_count_get(&pointcloud)) {
104 return false;
105 }
106 return cache->is_dirty == false;
107}
108
110{
112
113 if (!cache) {
114 cache = MEM_new<PointCloudBatchCache>(__func__);
115 pointcloud.batch_cache = cache;
116 }
117 else {
118 cache->eval_cache = {};
119 }
120
122 cache->eval_cache.surface_per_mat = static_cast<gpu::Batch **>(
123 MEM_callocN(sizeof(gpu::Batch *) * cache->eval_cache.mat_len, __func__));
124
125 cache->is_dirty = false;
126}
127
129{
131 if (cache == nullptr) {
132 return;
133 }
134 switch (mode) {
136 cache->is_dirty = true;
137 break;
138 default:
139 BLI_assert(0);
140 }
141}
142
151
153{
155 if (!cache) {
156 return;
157 }
158
164
165 if (cache->eval_cache.surface_per_mat) {
166 for (int i = 0; i < cache->eval_cache.mat_len; i++) {
168 }
169 }
171
173}
174
176{
177 if (!pointcloud_batch_cache_valid(*pointcloud)) {
178 pointcloud_batch_cache_clear(*pointcloud);
179 pointcloud_batch_cache_init(*pointcloud);
180 }
181}
182
184{
185 pointcloud_batch_cache_clear(*pointcloud);
186 MEM_delete(static_cast<PointCloudBatchCache *>(pointcloud->batch_cache));
187 pointcloud->batch_cache = nullptr;
188}
189
191{
193 if (!cache) {
194 return;
195 }
196
197 bool do_discard = false;
198
200 {
201 cache->eval_cache.last_attr_matching_time = ctime;
202 }
203
204 if (ctime - cache->eval_cache.last_attr_matching_time > U.vbotimeout) {
205 do_discard = true;
206 }
207
209
210 if (do_discard) {
212 }
213}
214
217/* -------------------------------------------------------------------- */
221static const uint half_octahedron_tris[4][3] = {
222 {0, 1, 2},
223 {0, 2, 3},
224 {0, 3, 4},
225 {0, 4, 1},
226};
227
228static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
229{
230 /* Overlap shape and point indices to avoid both having to store the indices into a separate
231 * buffer and avoid rendering points as instances. */
232 uint32_t vertid_max = pointcloud.totpoint << 3;
233 constexpr uint32_t tri_count_per_point = ARRAY_SIZE(half_octahedron_tris);
234 uint32_t primitive_len = pointcloud.totpoint * tri_count_per_point;
235
236 GPUIndexBufBuilder builder;
237 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, primitive_len, vertid_max);
239
240 /* TODO(fclem): Could be build on GPU or not be built at all. */
241 threading::parallel_for(IndexRange(pointcloud.totpoint), 1024, [&](const IndexRange range) {
242 for (int p : range) {
243 for (int i : IndexRange(tri_count_per_point)) {
244 data[p * tri_count_per_point + i] = uint3(half_octahedron_tris[i]) | (p << 3);
245 }
246 }
247 });
248
250 &builder, 0, primitive_len * 3, false, cache.eval_cache.geom_indices);
251}
252
255{
256 const bke::AttributeAccessor attributes = pointcloud.attributes();
257 const Span<float3> positions = pointcloud.positions();
258 const VArray<float> radii = *attributes.lookup<float>("radius");
259 static GPUVertFormat format = {0};
260 if (format.attr_len == 0) {
262 }
263
266
267 GPU_vertbuf_data_alloc(*cache.eval_cache.pos_rad, positions.size());
268 MutableSpan<float4> vbo_data = cache.eval_cache.pos_rad->data<float4>();
269 if (radii) {
270 const VArraySpan<float> radii_span(std::move(radii));
271 threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
272 for (const int i : range) {
273 vbo_data[i].x = positions[i].x;
274 vbo_data[i].y = positions[i].y;
275 vbo_data[i].z = positions[i].z;
276 /* TODO(fclem): remove multiplication. Here only for keeping the size correct for now. */
277 vbo_data[i].w = radii_span[i] * 100.0f;
278 }
279 });
280 }
281 else {
282 threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
283 for (const int i : range) {
284 vbo_data[i].x = positions[i].x;
285 vbo_data[i].y = positions[i].y;
286 vbo_data[i].z = positions[i].z;
287 vbo_data[i].w = 1.0f;
288 }
289 });
290 }
291}
292
293static void pointcloud_extract_attribute(const PointCloud &pointcloud,
295 const DRW_AttributeRequest &request,
296 int index)
297{
298 gpu::VertBuf &attr_buf = *cache.eval_cache.attributes_buf[index];
299
300 const bke::AttributeAccessor attributes = pointcloud.attributes();
301
302 /* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
303 * by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
304 * the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
305 * similar texture state swizzle to map the attribute correctly as for volume attributes, so we
306 * can control the conversion ourselves. */
307 bke::AttributeReader<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
308 request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
309
310 static GPUVertFormat format = {0};
311 if (format.attr_len == 0) {
313 }
315 GPU_vertbuf_init_with_format_ex(attr_buf, format, usage_flag);
316 GPU_vertbuf_data_alloc(attr_buf, pointcloud.totpoint);
317
318 attribute.varray.materialize(attr_buf.data<ColorGeometry4f>());
319}
320
323/* -------------------------------------------------------------------- */
333
335 GPUMaterial **gpu_materials,
336 int mat_len)
337{
339 DRW_Attributes attrs_needed;
340 drw_attributes_clear(&attrs_needed);
341
342 for (GPUMaterial *gpu_material : Span<GPUMaterial *>(gpu_materials, mat_len)) {
343 ListBase gpu_attrs = GPU_material_attributes(gpu_material);
344 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
345 const char *name = gpu_attr->name;
346
347 int layer_index;
348 eCustomDataType type;
349 bke::AttrDomain domain = bke::AttrDomain::Point;
350 if (!drw_custom_data_match_attribute(pointcloud->pdata, name, &layer_index, &type)) {
351 continue;
352 }
353
354 drw_attributes_add_request(&attrs_needed, name, type, layer_index, domain);
355 }
356 }
357
358 if (!drw_attributes_overlap(&cache->eval_cache.attr_used, &attrs_needed)) {
359 /* Some new attributes have been added, free all and start over. */
360 for (const int i : IndexRange(GPU_MAX_ATTR)) {
362 }
363 drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed, cache->render_mutex);
364 }
365 drw_attributes_merge(&cache->eval_cache.attr_used_over_time, &attrs_needed, cache->render_mutex);
366
368 return cache->eval_cache.surface_per_mat;
369}
370
371gpu::Batch *pointcloud_surface_get(PointCloud *pointcloud)
372{
374 return DRW_batch_request(&cache->eval_cache.surface);
375}
376
379/* -------------------------------------------------------------------- */
384{
385 PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
387 return DRW_batch_request(&cache->eval_cache.dots);
388}
389
391{
392 PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
393 return pointcloud_position_and_radius_get(&pointcloud);
394}
395
397{
399
400 int layer_index;
401 eCustomDataType type;
402 bke::AttrDomain domain = bke::AttrDomain::Point;
403 if (drw_custom_data_match_attribute(pointcloud->pdata, name, &layer_index, &type)) {
404 DRW_Attributes attributes{};
405 drw_attributes_add_request(&attributes, name, type, layer_index, domain);
406 drw_attributes_merge(&cache.eval_cache.attr_used, &attributes, cache.render_mutex);
407 }
408
409 int request_i = -1;
410 for (const int i : IndexRange(cache.eval_cache.attr_used.num_requests)) {
411 if (STREQ(cache.eval_cache.attr_used.requests[i].attribute_name, name)) {
412 request_i = i;
413 break;
414 }
415 }
416 if (request_i == -1) {
417 return nullptr;
418 }
419 return &cache.eval_cache.attributes_buf[request_i];
420}
421
423{
424 return max_ii(1, pointcloud->totcol);
425}
426
428{
429 PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
431
434 }
435
439 }
440 for (int i = 0; i < cache.eval_cache.mat_len; i++) {
442 /* TODO(fclem): Per material ranges. */
444 }
445 }
446 for (int j = 0; j < cache.eval_cache.attr_used.num_requests; j++) {
447 DRW_vbo_request(nullptr, &cache.eval_cache.attributes_buf[j]);
448
450 pointcloud_extract_attribute(*pointcloud, cache, cache.eval_cache.attr_used.requests[j], j);
451 }
452 }
453
455 pointcloud_extract_indices(*pointcloud, cache);
456 }
457
459 pointcloud_extract_position_and_radius(*pointcloud, cache);
460 }
461}
462
465} // namespace blender::draw
General operations for point clouds.
@ BKE_POINTCLOUD_BATCH_DIRTY_ALL
#define BLI_assert(a)
Definition BLI_assert.h:50
#define LISTBASE_FOREACH(type, var, list)
MINLINE int max_ii(int a, int b)
unsigned int uint
#define ARRAY_SIZE(arr)
#define STREQ(a, b)
Object is a sort of wrapper for general info.
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:205
void GPU_indexbuf_build_in_place_ex(GPUIndexBufBuilder *builder, uint index_min, uint index_max, bool uses_restart_indices, blender::gpu::IndexBuf *elem)
blender::MutableSpan< uint32_t > GPU_indexbuf_get_data(GPUIndexBufBuilder *)
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
ListBase GPU_material_attributes(const GPUMaterial *material)
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRIS
#define GPU_MAX_ATTR
Definition GPU_shader.hh:29
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_init_with_format_ex(blender::gpu::VertBuf &verts, const GPUVertFormat &format, GPUUsageType)
@ GPU_USAGE_STATIC
@ GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY
@ GPU_FETCH_FLOAT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
unsigned int U
Definition btGjkEpa3.h:78
AttributeSet attributes
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:736
constexpr IndexRange index_range() const
Definition BLI_span.hh:671
MutableSpan< T > data()
Utilities for rendering attributes.
bool DRW_batch_requested(blender::gpu::Batch *batch, GPUPrimType prim_type)
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
void DRW_vbo_request(blender::gpu::Batch *batch, blender::gpu::VertBuf **vbo)
bool DRW_vbo_requested(blender::gpu::VertBuf *vbo)
void DRW_ibo_request(blender::gpu::Batch *batch, blender::gpu::IndexBuf **ibo)
bool DRW_ibo_requested(blender::gpu::IndexBuf *ibo)
format
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
static PointCloudBatchCache * pointcloud_batch_cache_get(PointCloud &pointcloud)
gpu::Batch * pointcloud_surface_get(PointCloud *pointcloud)
void drw_attributes_clear(DRW_Attributes *attributes)
static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud, PointCloudBatchCache &cache)
static void pointcloud_discard_attributes(PointCloudBatchCache &cache)
void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
bool drw_custom_data_match_attribute(const CustomData &custom_data, const char *name, int *r_layer_index, eCustomDataType *r_type)
static void pointcloud_batch_cache_init(PointCloud &pointcloud)
gpu::Batch ** pointcloud_surface_shaded_get(PointCloud *pointcloud, GPUMaterial **gpu_materials, int mat_len)
blender::gpu::Batch * DRW_pointcloud_batch_cache_get_dots(Object *ob)
static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
void DRW_pointcloud_batch_cache_free_old(PointCloud *pointcloud, int ctime)
static const uint half_octahedron_tris[4][3]
void DRW_pointcloud_batch_cache_create_requested(Object *ob)
void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
int DRW_pointcloud_material_count_get(const PointCloud *pointcloud)
void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, std::mutex &render_mutex)
static bool pointcloud_batch_cache_valid(PointCloud &pointcloud)
gpu::VertBuf ** DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const char *name)
static void pointcloud_extract_attribute(const PointCloud &pointcloud, PointCloudBatchCache &cache, const DRW_AttributeRequest &request, int index)
void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
void drw_attributes_add_request(DRW_Attributes *attrs, const char *name, const eCustomDataType type, const int layer_index, const blender::bke::AttrDomain domain)
gpu::VertBuf * pointcloud_position_and_radius_get(PointCloud *pointcloud)
gpu::VertBuf * DRW_pointcloud_position_and_radius_buffer_get(Object *ob)
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
unsigned int uint32_t
Definition stdint.h:80
struct CustomData pdata
DRW_AttributeRequest requests[GPU_MAX_ATTR]