Blender V5.0
draw_cache_impl_pointcloud.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <cstring>
12
13#include "MEM_guardedalloc.h"
14
15#include "BLI_color.hh"
16#include "BLI_listbase.h"
17#include "BLI_task.hh"
18#include "BLI_utildefines.h"
19
20#include "DNA_object_types.h"
22#include "DNA_userdef_types.h"
23
24#include "BKE_attribute.hh"
25#include "BKE_material.hh"
26#include "BKE_pointcloud.hh"
27
28#include "GPU_batch.hh"
29#include "GPU_material.hh"
30
31#include "DRW_render.hh"
32
33#include "draw_attributes.hh"
34#include "draw_cache_impl.hh"
35#include "draw_cache_inline.hh"
36#include "draw_pointcloud_private.hh" /* own include */
37
38namespace blender::draw {
39
40/* -------------------------------------------------------------------- */
43
45 /* Dot primitive types. */
46 gpu::Batch *dots;
47 /* Triangle primitive types. */
48 gpu::Batch *surface;
49 gpu::Batch **surface_per_mat;
50
51 /* Triangles indices to draw the points. */
53
54 /* Position and radius. */
56 /* Active attribute in 3D view. */
58 /* Requested attributes */
60
68
75
77};
78
81
83 gpu::Batch *edit_selection = nullptr;
84
85 /* settings to determine if cache is invalid */
87};
88
90{
91 return static_cast<PointCloudBatchCache *>(pointcloud.batch_cache);
92}
93
95{
97
98 if (cache == nullptr) {
99 return false;
100 }
102 return false;
103 }
104 return cache->is_dirty == false;
105}
106
108{
110
111 if (!cache) {
112 cache = MEM_new<PointCloudBatchCache>(__func__);
113 pointcloud.batch_cache = cache;
114 }
115 else {
116 cache->eval_cache = {};
117 cache->edit_selection = nullptr;
118 cache->edit_selection_indices = nullptr;
119 }
120
122 cache->eval_cache.surface_per_mat = static_cast<gpu::Batch **>(
123 MEM_callocN(sizeof(gpu::Batch *) * cache->eval_cache.mat_len, __func__));
124
125 cache->is_dirty = false;
126}
127
129{
131 if (cache == nullptr) {
132 return;
133 }
134 switch (mode) {
136 cache->is_dirty = true;
137 break;
138 default:
139 BLI_assert(0);
140 }
141}
142
144{
145 for (const int j : IndexRange(GPU_MAX_ATTR)) {
147 }
148
149 cache.eval_cache.attr_used.clear();
150}
151
177
179{
180 if (!pointcloud_batch_cache_valid(*pointcloud)) {
181 pointcloud_batch_cache_clear(*pointcloud);
182 pointcloud_batch_cache_init(*pointcloud);
183 }
184}
185
187{
188 pointcloud_batch_cache_clear(*pointcloud);
189 MEM_delete(static_cast<PointCloudBatchCache *>(pointcloud->batch_cache));
190 pointcloud->batch_cache = nullptr;
191}
192
194{
196 if (!cache) {
197 return;
198 }
199
200 bool do_discard = false;
201
203 {
204 cache->eval_cache.last_attr_matching_time = ctime;
205 }
206
207 if (ctime - cache->eval_cache.last_attr_matching_time > U.vbotimeout) {
208 do_discard = true;
209 }
210
212
213 if (do_discard) {
215 }
216}
217
219
220/* -------------------------------------------------------------------- */
223
224static const uint half_octahedron_tris[4][3] = {
225 {0, 1, 2},
226 {0, 2, 3},
227 {0, 3, 4},
228 {0, 4, 1},
229};
230
231static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
232{
233 /* Overlap shape and point indices to avoid both having to store the indices into a separate
234 * buffer and avoid rendering points as instances. */
235 uint32_t vertid_max = pointcloud.totpoint << 3;
236 constexpr uint32_t tri_count_per_point = ARRAY_SIZE(half_octahedron_tris);
237 uint32_t primitive_len = pointcloud.totpoint * tri_count_per_point;
238
239 GPUIndexBufBuilder builder;
240
241 /* Max allowed points to ensure the size of the index buffer will not overflow.
242 * NOTE: pointcloud.totpoint is an int we assume that we can safely use 31 bits. */
243 const uint32_t max_totpoint = INT32_MAX / uint32_t(tri_count_per_point *
245 if (pointcloud.totpoint > max_totpoint) {
246 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, 0, 0);
247 GPU_indexbuf_build_in_place_ex(&builder, 0, 0, false, cache.eval_cache.geom_indices);
248 return;
249 }
250
251 GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, primitive_len, vertid_max);
253
254 /* TODO(fclem): Could be build on GPU or not be built at all. */
255 threading::parallel_for(IndexRange(pointcloud.totpoint), 1024, [&](const IndexRange range) {
256 for (int p : range) {
257 for (int i : IndexRange(tri_count_per_point)) {
258 data[p * tri_count_per_point + i] = uint3(half_octahedron_tris[i]) | (p << 3);
259 }
260 }
261 });
262
264 &builder, 0, primitive_len * 3, false, cache.eval_cache.geom_indices);
265}
266
269{
270 const bke::AttributeAccessor attributes = pointcloud.attributes();
271 const Span<float3> positions = pointcloud.positions();
272 const VArray<float> radii = *attributes.lookup<float>("radius");
273 static const GPUVertFormat format = [&]() {
275 GPU_vertformat_attr_add(&format, "pos", gpu::VertAttrType::SFLOAT_32_32_32_32);
276 GPU_vertformat_alias_add(&format, "pos_rad");
277 return format;
278 }();
279
282
283 GPU_vertbuf_data_alloc(*cache.eval_cache.pos_rad, positions.size());
284 MutableSpan<float4> vbo_data = cache.eval_cache.pos_rad->data<float4>();
285 if (radii) {
286 const VArraySpan<float> radii_span(std::move(radii));
287 threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
288 for (const int i : range) {
289 vbo_data[i].x = positions[i].x;
290 vbo_data[i].y = positions[i].y;
291 vbo_data[i].z = positions[i].z;
292 vbo_data[i].w = radii_span[i];
293 }
294 });
295 }
296 else {
297 threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
298 for (const int i : range) {
299 vbo_data[i].x = positions[i].x;
300 vbo_data[i].y = positions[i].y;
301 vbo_data[i].z = positions[i].z;
302 vbo_data[i].w = 0.01f;
303 }
304 });
305 }
306}
307
308static void pointcloud_extract_attribute(const PointCloud &pointcloud,
310 const StringRef name,
311 int index)
312{
313 gpu::VertBuf &attr_buf = *cache.eval_cache.attributes_buf[index];
314
315 const bke::AttributeAccessor attributes = pointcloud.attributes();
316
317 /* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
318 * by OpenGL to float4 for a scalar `s` will produce a `float4(s, 0, 0, 1)`. However, following
319 * the Blender convention, it should be `float4(s, s, s, 1)`. This could be resolved using a
320 * similar texture state swizzle to map the attribute correctly as for volume attributes, so we
321 * can control the conversion ourselves. */
323 name, bke::AttrDomain::Point, {0.0f, 0.0f, 0.0f, 1.0f});
324
325 static const GPUVertFormat format = [&]() {
327 GPU_vertformat_attr_add(&format, "attr", gpu::VertAttrType::SFLOAT_32_32_32_32);
328 return format;
329 }();
331 GPU_vertbuf_init_with_format_ex(attr_buf, format, usage_flag);
332 GPU_vertbuf_data_alloc(attr_buf, pointcloud.totpoint);
333
334 attribute.varray.materialize(attr_buf.data<ColorGeometry4f>());
335}
336
338
339/* -------------------------------------------------------------------- */
342
349
351 GPUMaterial **gpu_materials,
352 int mat_len)
353{
354 const bke::AttributeAccessor attributes = pointcloud->attributes();
356 VectorSet<std::string> attrs_needed;
357
358 for (GPUMaterial *gpu_material : Span<GPUMaterial *>(gpu_materials, mat_len)) {
359 ListBase gpu_attrs = GPU_material_attributes(gpu_material);
360 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
361 const StringRef name = gpu_attr->name;
362 if (!attributes.contains(name)) {
363 continue;
364 }
365 drw_attributes_add_request(&attrs_needed, name);
366 }
367 }
368
369 if (!drw_attributes_overlap(&cache->eval_cache.attr_used, &attrs_needed)) {
370 /* Some new attributes have been added, free all and start over. */
371 for (const int i : IndexRange(GPU_MAX_ATTR)) {
373 }
374 drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed);
375 }
377
379 return cache->eval_cache.surface_per_mat;
380}
381
382gpu::Batch *pointcloud_surface_get(PointCloud *pointcloud)
383{
385 return DRW_batch_request(&cache->eval_cache.surface);
386}
387
389
390/* -------------------------------------------------------------------- */
393
400
406
408{
409 const bke::AttributeAccessor attributes = pointcloud->attributes();
411
412 if (!attributes.contains(name)) {
413 return nullptr;
414 }
415 {
416 VectorSet<std::string> requests{};
418 drw_attributes_merge(&cache.eval_cache.attr_used, &requests);
419 }
420
421 int request_i = -1;
422 for (const int i : IndexRange(cache.eval_cache.attr_used.index_range())) {
423 if (cache.eval_cache.attr_used[i] == name) {
424 request_i = i;
425 break;
426 }
427 }
428 if (request_i == -1) {
429 return nullptr;
430 }
431 return &cache.eval_cache.attributes_buf[request_i];
432}
433
435{
436 const int max_index = mask.min_array_size();
437 GPUIndexBufBuilder builder;
438 GPU_indexbuf_init(&builder, GPU_PRIM_POINTS, mask.size(), max_index);
440 mask.to_indices<int>(data.cast<int>());
441 GPU_indexbuf_build_in_place_ex(&builder, 0, max_index, false, &ibo);
442}
443
444static void build_edit_selection_indices(const PointCloud &pointcloud, gpu::IndexBuf &ibo)
445{
446 const VArray selection = *pointcloud.attributes().lookup_or_default<bool>(
447 ".selection", bke::AttrDomain::Point, true);
448 IndexMaskMemory memory;
449 const IndexMask mask = IndexMask::from_bools(selection, memory);
450 if (mask.is_empty()) {
451 return;
452 }
454}
455
457{
460
463 }
464
468 }
469
473 }
474 for (int i = 0; i < cache.eval_cache.mat_len; i++) {
476 /* TODO(fclem): Per material ranges. */
478 }
479 }
480 for (const int j : cache.eval_cache.attr_used.index_range()) {
481 DRW_vbo_request(nullptr, &cache.eval_cache.attributes_buf[j]);
482
484 pointcloud_extract_attribute(pointcloud, cache, cache.eval_cache.attr_used[j], j);
485 }
486 }
487
490 }
491
493 pointcloud_extract_indices(pointcloud, cache);
494 }
495
498 }
499}
500
502{
504 return DRW_batch_request(&cache->edit_selection);
505}
506
508
509} // namespace blender::draw
General operations, lookup, etc. for materials.
int BKE_id_material_used_with_fallback_eval(const ID &id)
General operations for point clouds.
@ BKE_POINTCLOUD_BATCH_DIRTY_ALL
#define BLI_assert(a)
Definition BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
unsigned int uint
#define ARRAY_SIZE(arr)
Object is a sort of wrapper for general info.
T & DRW_object_get_data_for_drawing(const Object &object)
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:197
void GPU_indexbuf_build_in_place_ex(GPUIndexBufBuilder *builder, uint index_min, uint index_max, bool uses_restart_indices, blender::gpu::IndexBuf *elem)
int GPU_indexbuf_primitive_len(GPUPrimType prim_type)
blender::MutableSpan< uint32_t > GPU_indexbuf_get_data(GPUIndexBufBuilder *)
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
ListBase GPU_material_attributes(const GPUMaterial *material)
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRIS
static constexpr int GPU_MAX_ATTR
Definition GPU_shader.hh:33
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_init_with_format_ex(blender::gpu::VertBuf &verts, const GPUVertFormat &format, GPUUsageType)
@ GPU_USAGE_STATIC
@ GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY
void GPU_vertformat_alias_add(GPUVertFormat *, blender::StringRef alias)
uint GPU_vertformat_attr_add(GPUVertFormat *format, blender::StringRef name, blender::gpu::VertAttrType type)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
#define U
BMesh const char void * data
AttributeSet attributes
static IndexMask from_bools(Span< bool > bools, IndexMaskMemory &memory)
IndexRange index_range() const
constexpr IndexRange index_range() const
Definition BLI_span.hh:670
constexpr int64_t size() const
Definition BLI_span.hh:252
GAttributeReader lookup_or_default(StringRef attribute_id, AttrDomain domain, AttrType data_type, const void *default_value=nullptr) const
bool contains(StringRef attribute_id) const
GAttributeReader lookup(const StringRef attribute_id) const
MutableSpan< T > data()
Utilities for rendering attributes.
bool DRW_batch_requested(blender::gpu::Batch *batch, GPUPrimType prim_type)
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
void DRW_vbo_request(blender::gpu::Batch *batch, blender::gpu::VertBuf **vbo)
bool DRW_vbo_requested(blender::gpu::VertBuf *vbo)
void DRW_ibo_request(blender::gpu::Batch *batch, blender::gpu::IndexBuf **ibo)
bool DRW_ibo_requested(blender::gpu::IndexBuf *ibo)
#define INT32_MAX
format
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
void drw_attributes_add_request(VectorSet< std::string > *attrs, const StringRef name)
static PointCloudBatchCache * pointcloud_batch_cache_get(PointCloud &pointcloud)
gpu::Batch * pointcloud_surface_get(PointCloud *pointcloud)
static void pointcloud_extract_attribute(const PointCloud &pointcloud, PointCloudBatchCache &cache, const StringRef name, int index)
static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud, PointCloudBatchCache &cache)
static void build_edit_selection_indices(const PointCloud &pointcloud, gpu::IndexBuf &ibo)
static void pointcloud_discard_attributes(PointCloudBatchCache &cache)
void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
static void pointcloud_batch_cache_init(PointCloud &pointcloud)
gpu::Batch ** pointcloud_surface_shaded_get(PointCloud *pointcloud, GPUMaterial **gpu_materials, int mat_len)
blender::gpu::Batch * DRW_pointcloud_batch_cache_get_dots(Object *ob)
static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
void DRW_pointcloud_batch_cache_free_old(PointCloud *pointcloud, int ctime)
blender::gpu::Batch * DRW_pointcloud_batch_cache_get_edit_dots(PointCloud *pointcloud)
bool drw_attributes_overlap(const VectorSet< std::string > *a, const VectorSet< std::string > *b)
gpu::VertBuf ** DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, StringRef name)
static const uint half_octahedron_tris[4][3]
void DRW_pointcloud_batch_cache_create_requested(Object *ob)
void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
static void index_mask_to_ibo(const IndexMask &mask, gpu::IndexBuf &ibo)
static bool pointcloud_batch_cache_valid(PointCloud &pointcloud)
void drw_attributes_merge(VectorSet< std::string > *dst, const VectorSet< std::string > *src)
void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
gpu::VertBuf * pointcloud_position_and_radius_get(PointCloud *pointcloud)
gpu::VertBuf * DRW_pointcloud_position_and_radius_buffer_get(Object *ob)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< uint32_t, 3 > uint3
VecBase< float, 4 > float4
ColorSceneLinear4f< eAlpha::Premultiplied > ColorGeometry4f
const char * name
i
Definition text_draw.cc:230