Blender V5.0
draw_cache_impl_lattice.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "MEM_guardedalloc.h"
12
13#include "BLI_utildefines.h"
14
15#include "DNA_curve_types.h"
16#include "DNA_lattice_types.h"
17#include "DNA_meshdata_types.h"
18
19#include "BKE_deform.hh"
20#include "BKE_lattice.hh"
21
22#include "GPU_batch.hh"
23
24#include "draw_cache_impl.hh" /* own include */
25
26#define SELECT 1
27
28namespace blender::draw {
29
30static void lattice_batch_cache_clear(Lattice *lt);
31
32/* ---------------------------------------------------------------------- */
33/* Lattice Interface, direct access to basic data. */
34
35static int vert_len_calc(int u, int v, int w)
36{
37 if (u <= 0 || v <= 0 || w <= 0) {
38 return 0;
39 }
40 return u * v * w;
41}
42
43static int edge_len_calc(int u, int v, int w)
44{
45 if (u <= 0 || v <= 0 || w <= 0) {
46 return 0;
47 }
48 return (((((u - 1) * v) + ((v - 1) * u)) * w) + ((w - 1) * (u * v)));
49}
50
52{
53 if (lt->editlatt) {
54 lt = lt->editlatt->latt;
55 }
56
57 const int u = lt->pntsu;
58 const int v = lt->pntsv;
59 const int w = lt->pntsw;
60
61 if ((lt->flag & LT_OUTSIDE) == 0) {
62 return vert_len_calc(u, v, w);
63 }
64
65 /* TODO: remove internal coords. */
66 return vert_len_calc(u, v, w);
67}
68
70{
71 if (lt->editlatt) {
72 lt = lt->editlatt->latt;
73 }
74
75 const int u = lt->pntsu;
76 const int v = lt->pntsv;
77 const int w = lt->pntsw;
78
79 if ((lt->flag & LT_OUTSIDE) == 0) {
80 return edge_len_calc(u, v, w);
81 }
82
83 /* TODO: remove internal coords. */
84 return edge_len_calc(u, v, w);
85}
86
87/* ---------------------------------------------------------------------- */
88/* Lattice Interface, indirect, partially cached access to complex data. */
89
91 int types;
92
95
96 struct {
100
103
104 int actbp;
105
107};
108
109enum {
113};
114
116{
117 LatticeRenderData *rdata = static_cast<LatticeRenderData *>(
118 MEM_callocN(sizeof(*rdata), __func__));
119 rdata->types = types;
120
121 if (lt->editlatt) {
122 EditLatt *editlatt = lt->editlatt;
123 lt = editlatt->latt;
124
125 rdata->edit_latt = editlatt;
126
127 rdata->dvert = lt->dvert;
128
129 if (types & (LR_DATATYPE_VERT)) {
131 }
132 if (types & (LR_DATATYPE_EDGE)) {
134 }
136 rdata->actbp = lt->actbp;
137 }
138 }
139 else {
140 rdata->dvert = nullptr;
141
142 if (types & (LR_DATATYPE_VERT)) {
144 }
145 if (types & (LR_DATATYPE_EDGE)) {
147 /* No edge data. */
148 }
149 }
150
151 rdata->bp = lt->def;
152
153 rdata->dims.u_len = lt->pntsu;
154 rdata->dims.v_len = lt->pntsv;
155 rdata->dims.w_len = lt->pntsw;
156
157 rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
158 rdata->actbp = lt->actbp;
159
160 return rdata;
161}
162
164{
165#if 0
166 if (rdata->loose_verts) {
167 MEM_freeN(rdata->loose_verts);
168 }
169#endif
170 MEM_freeN(rdata);
171}
172
174{
176 return rdata->vert_len;
177}
178
180{
182 return rdata->edge_len;
183}
184
186 const int vert_idx)
187{
189 return &rdata->bp[vert_idx];
190}
191
192/* ---------------------------------------------------------------------- */
193/* Lattice gpu::Batch Cache */
194
198
199 gpu::Batch *all_verts;
200 gpu::Batch *all_edges;
201
202 gpu::Batch *overlay_verts;
203
204 /* settings to determine if cache is invalid */
206
207 struct {
211
213};
214
215/* gpu::Batch cache management. */
216
218{
219 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
220
221 if (cache == nullptr) {
222 return false;
223 }
224
225 if (cache->is_editmode != (lt->editlatt != nullptr)) {
226 return false;
227 }
228
229 if (cache->is_dirty) {
230 return false;
231 }
232
233 if ((cache->dims.u_len != lt->pntsu) || (cache->dims.v_len != lt->pntsv) ||
234 (cache->dims.w_len != lt->pntsw) ||
235 (cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0)))
236 {
237 return false;
238 }
239
240 return true;
241}
242
244{
245 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
246
247 if (!cache) {
248 cache = static_cast<LatticeBatchCache *>(
249 lt->batch_cache = MEM_callocN(sizeof(*cache), __func__));
250 }
251 else {
252 memset(cache, 0, sizeof(*cache));
253 }
254
255 cache->dims.u_len = lt->pntsu;
256 cache->dims.v_len = lt->pntsv;
257 cache->dims.w_len = lt->pntsw;
258 cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
259
260 cache->is_editmode = lt->editlatt != nullptr;
261
262 cache->is_dirty = false;
263}
264
272
274{
275 return static_cast<LatticeBatchCache *>(lt->batch_cache);
276}
277
279{
280 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
281 if (cache == nullptr) {
282 return;
283 }
284 switch (mode) {
286 cache->is_dirty = true;
287 break;
289 /* TODO: Separate Flag VBO. */
291 break;
292 default:
293 BLI_assert(0);
294 }
295}
296
298{
299 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
300 if (!cache) {
301 return;
302 }
303
307
310}
311
317
318/* gpu::Batch cache usage. */
320 LatticeBatchCache *cache,
321 bool use_weight,
322 const int actdef)
323{
325
326 if (cache->pos == nullptr) {
327 GPUVertFormat format = {0};
328 struct {
329 uint pos, col;
330 } attr_id;
331
332 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", gpu::VertAttrType::SFLOAT_32_32_32);
333 if (use_weight) {
334 attr_id.col = GPU_vertformat_attr_add(&format, "weight", gpu::VertAttrType::SFLOAT_32);
335 }
336
337 const int vert_len = lattice_render_data_verts_len_get(rdata);
338
340 GPU_vertbuf_data_alloc(*cache->pos, vert_len);
341 for (int i = 0; i < vert_len; i++) {
342 const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
343 GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
344
345 if (use_weight) {
346 const float no_active_weight = 666.0f;
347 float weight = (actdef > -1) ? BKE_defvert_find_weight(rdata->dvert + i, actdef) :
348 no_active_weight;
349 GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, &weight);
350 }
351 }
352 }
353
354 return cache->pos;
355}
356
358 LatticeBatchCache *cache)
359{
361
362 if (cache->edges == nullptr) {
363 const int vert_len = lattice_render_data_verts_len_get(rdata);
364 const int edge_len = lattice_render_data_edges_len_get(rdata);
365
366 GPUIndexBufBuilder builder;
367 GPU_indexbuf_init(&builder, GPU_PRIM_LINES, edge_len, vert_len);
369 int line_index = 0;
370
371#define LATT_INDEX(u, v, w) ((((w) * rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
372
373 for (int w = 0; w < rdata->dims.w_len; w++) {
374 int wxt = ELEM(w, 0, rdata->dims.w_len - 1);
375 for (int v = 0; v < rdata->dims.v_len; v++) {
376 int vxt = ELEM(v, 0, rdata->dims.v_len - 1);
377 for (int u = 0; u < rdata->dims.u_len; u++) {
378 int uxt = ELEM(u, 0, rdata->dims.u_len - 1);
379
380 if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
381 data[line_index++] = uint2(LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
382 }
383 if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
384 data[line_index++] = uint2(LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
385 }
386 if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
387 data[line_index++] = uint2(LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
388 }
389 }
390 }
391 }
392
393#undef LATT_INDEX
394
395 if (rdata->show_only_outside) {
396 BLI_assert(line_index <= edge_len);
397 }
398 else {
399 BLI_assert(line_index == edge_len);
400 }
401
402 cache->edges = GPU_indexbuf_build_ex(&builder, 0, vert_len, false);
403 }
404
405 return cache->edges;
406}
407
409{
410 /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
412
415
416 if (cache->overlay_verts == nullptr) {
417 static struct {
418 uint pos, data;
419 } attr_id;
420 static const GPUVertFormat format = [&]() {
422 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", gpu::VertAttrType::SFLOAT_32_32_32);
423 attr_id.data = GPU_vertformat_attr_add(&format, "data", gpu::VertAttrType::UINT_32);
424 return format;
425 }();
426
427 const int vert_len = lattice_render_data_verts_len_get(rdata);
428
430 GPU_vertbuf_data_alloc(*vbo, vert_len);
431 for (int i = 0; i < vert_len; i++) {
432 const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
433
434 uint32_t vflag = 0;
435 if (bp->f1 & SELECT) {
436 if (i == rdata->actbp) {
437 vflag |= VFLAG_VERT_ACTIVE;
438 }
439 else {
440 vflag |= VFLAG_VERT_SELECTED;
441 }
442 }
443
444 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
445 GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
446 }
447
449 }
450
452}
453
454gpu::Batch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
455{
457
458 if (cache->all_edges == nullptr) {
459 /* create batch from Lattice */
461
464 lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
465 lattice_batch_cache_get_edges(rdata, cache));
466
468 }
469
470 return cache->all_edges;
471}
472
474{
476
477 if (cache->all_verts == nullptr) {
479
481 GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), nullptr);
482
484 }
485
486 return cache->all_verts;
487}
488
490{
492
493 if (cache->overlay_verts == nullptr) {
495 }
496
497 return cache->overlay_verts;
498}
499
500} // namespace blender::draw
support for deformation groups and hooks.
float BKE_defvert_find_weight(const MDeformVert *dvert, int defgroup)
Definition deform.cc:774
@ BKE_LATTICE_BATCH_DIRTY_SELECT
@ BKE_LATTICE_BATCH_DIRTY_ALL
#define BLI_assert(a)
Definition BLI_assert.h:46
unsigned int uint
#define ELEM(...)
@ LT_OUTSIDE
#define GPU_batch_create(primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:141
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:197
blender::gpu::Batch * GPU_batch_create_ex(GPUPrimType primitive_type, blender::gpu::VertBuf *vertex_buf, blender::gpu::IndexBuf *index_buf, GPUBatchFlag owns_flag)
Definition gpu_batch.cc:51
@ GPU_BATCH_OWNS_VBO
Definition GPU_batch.hh:42
blender::MutableSpan< uint32_t > GPU_indexbuf_get_data(GPUIndexBufBuilder *)
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
blender::gpu::IndexBuf * GPU_indexbuf_build_ex(GPUIndexBufBuilder *builder, uint index_min, uint index_max, bool uses_restart_indices)
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
static blender::gpu::VertBuf * GPU_vertbuf_create_with_format(const GPUVertFormat &format)
void GPU_vertbuf_attr_set(blender::gpu::VertBuf *, uint a_idx, uint v_idx, const void *data)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
uint GPU_vertformat_attr_add(GPUVertFormat *format, blender::StringRef name, blender::gpu::VertAttrType type)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
#define SELECT
CCL_NAMESPACE_BEGIN struct Options options
#define LATT_INDEX(u, v, w)
uint pos
uint col
struct @021025263243242147216143265077100330027142264337::@240232116316110053135047106323056371161236243121 attr_id
format
static char ** types
Definition makesdna.cc:71
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
blender::gpu::Batch * DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
void DRW_lattice_batch_cache_validate(Lattice *lt)
static int edge_len_calc(int u, int v, int w)
blender::gpu::Batch * DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, int actdef)
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
blender::gpu::Batch * DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
void DRW_lattice_batch_cache_free(Lattice *lt)
static void lattice_batch_cache_init(Lattice *lt)
static void lattice_batch_cache_clear(Lattice *lt)
static gpu::IndexBuf * lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
static const BPoint * lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
static LatticeBatchCache * lattice_batch_cache_get(Lattice *lt)
static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
static LatticeRenderData * lattice_render_data_create(Lattice *lt, const int types)
static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
static bool lattice_batch_cache_valid(Lattice *lt)
static void lattice_render_data_free(LatticeRenderData *rdata)
static gpu::VertBuf * lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache, bool use_weight, const int actdef)
static int lattice_render_verts_len_get(Lattice *lt)
static int lattice_render_edges_len_get(Lattice *lt)
static int vert_len_calc(int u, int v, int w)
VecBase< uint32_t, 2 > uint2
uint8_t f1
float vec[4]
struct Lattice * latt
void * batch_cache
struct MDeformVert * dvert
struct EditLatt * editlatt
struct BPoint * def
struct blender::draw::LatticeBatchCache::@072343233107120301027277004040321045100036216216 dims
struct blender::draw::LatticeRenderData::@031124354060133262320031101265231207120015077316 dims
i
Definition text_draw.cc:230