Blender V4.3
draw_cache_impl_lattice.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include "MEM_guardedalloc.h"
12
13#include "BLI_math_vector.h"
14#include "BLI_utildefines.h"
15
16#include "DNA_curve_types.h"
17#include "DNA_lattice_types.h"
18#include "DNA_meshdata_types.h"
19#include "DNA_userdef_types.h"
20
21#include "BKE_deform.hh"
22#include "BKE_lattice.hh"
23
24#include "GPU_batch.hh"
25
26#include "draw_cache_impl.hh" /* own include */
27
28#define SELECT 1
29
30namespace blender::draw {
31
32static void lattice_batch_cache_clear(Lattice *lt);
33
34/* ---------------------------------------------------------------------- */
35/* Lattice Interface, direct access to basic data. */
36
37static int vert_len_calc(int u, int v, int w)
38{
39 if (u <= 0 || v <= 0 || w <= 0) {
40 return 0;
41 }
42 return u * v * w;
43}
44
45static int edge_len_calc(int u, int v, int w)
46{
47 if (u <= 0 || v <= 0 || w <= 0) {
48 return 0;
49 }
50 return (((((u - 1) * v) + ((v - 1) * u)) * w) + ((w - 1) * (u * v)));
51}
52
54{
55 if (lt->editlatt) {
56 lt = lt->editlatt->latt;
57 }
58
59 const int u = lt->pntsu;
60 const int v = lt->pntsv;
61 const int w = lt->pntsw;
62
63 if ((lt->flag & LT_OUTSIDE) == 0) {
64 return vert_len_calc(u, v, w);
65 }
66
67 /* TODO: remove internal coords. */
68 return vert_len_calc(u, v, w);
69}
70
72{
73 if (lt->editlatt) {
74 lt = lt->editlatt->latt;
75 }
76
77 const int u = lt->pntsu;
78 const int v = lt->pntsv;
79 const int w = lt->pntsw;
80
81 if ((lt->flag & LT_OUTSIDE) == 0) {
82 return edge_len_calc(u, v, w);
83 }
84
85 /* TODO: remove internal coords. */
86 return edge_len_calc(u, v, w);
87}
88
89/* ---------------------------------------------------------------------- */
90/* Lattice Interface, indirect, partially cached access to complex data. */
91
93 int types;
94
97
98 struct {
102
105
106 int actbp;
107
109};
110
111enum {
115};
116
118{
119 LatticeRenderData *rdata = static_cast<LatticeRenderData *>(
120 MEM_callocN(sizeof(*rdata), __func__));
121 rdata->types = types;
122
123 if (lt->editlatt) {
124 EditLatt *editlatt = lt->editlatt;
125 lt = editlatt->latt;
126
127 rdata->edit_latt = editlatt;
128
129 rdata->dvert = lt->dvert;
130
131 if (types & (LR_DATATYPE_VERT)) {
133 }
134 if (types & (LR_DATATYPE_EDGE)) {
136 }
137 if (types & LR_DATATYPE_OVERLAY) {
138 rdata->actbp = lt->actbp;
139 }
140 }
141 else {
142 rdata->dvert = nullptr;
143
144 if (types & (LR_DATATYPE_VERT)) {
146 }
147 if (types & (LR_DATATYPE_EDGE)) {
149 /* No edge data. */
150 }
151 }
152
153 rdata->bp = lt->def;
154
155 rdata->dims.u_len = lt->pntsu;
156 rdata->dims.v_len = lt->pntsv;
157 rdata->dims.w_len = lt->pntsw;
158
159 rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
160 rdata->actbp = lt->actbp;
161
162 return rdata;
163}
164
166{
167#if 0
168 if (rdata->loose_verts) {
169 MEM_freeN(rdata->loose_verts);
170 }
171#endif
172 MEM_freeN(rdata);
173}
174
176{
178 return rdata->vert_len;
179}
180
182{
184 return rdata->edge_len;
185}
186
188 const int vert_idx)
189{
191 return &rdata->bp[vert_idx];
192}
193
194/* ---------------------------------------------------------------------- */
195/* Lattice gpu::Batch Cache */
196
200
201 gpu::Batch *all_verts;
202 gpu::Batch *all_edges;
203
204 gpu::Batch *overlay_verts;
205
206 /* settings to determine if cache is invalid */
208
209 struct {
213
215};
216
217/* gpu::Batch cache management. */
218
220{
221 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
222
223 if (cache == nullptr) {
224 return false;
225 }
226
227 if (cache->is_editmode != (lt->editlatt != nullptr)) {
228 return false;
229 }
230
231 if (cache->is_dirty) {
232 return false;
233 }
234
235 if ((cache->dims.u_len != lt->pntsu) || (cache->dims.v_len != lt->pntsv) ||
236 (cache->dims.w_len != lt->pntsw) ||
237 (cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0)))
238 {
239 return false;
240 }
241
242 return true;
243}
244
246{
247 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
248
249 if (!cache) {
250 cache = static_cast<LatticeBatchCache *>(
251 lt->batch_cache = MEM_callocN(sizeof(*cache), __func__));
252 }
253 else {
254 memset(cache, 0, sizeof(*cache));
255 }
256
257 cache->dims.u_len = lt->pntsu;
258 cache->dims.v_len = lt->pntsv;
259 cache->dims.w_len = lt->pntsw;
260 cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
261
262 cache->is_editmode = lt->editlatt != nullptr;
263
264 cache->is_dirty = false;
265}
266
274
276{
277 return static_cast<LatticeBatchCache *>(lt->batch_cache);
278}
279
281{
282 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
283 if (cache == nullptr) {
284 return;
285 }
286 switch (mode) {
288 cache->is_dirty = true;
289 break;
291 /* TODO: Separate Flag VBO. */
293 break;
294 default:
295 BLI_assert(0);
296 }
297}
298
300{
301 LatticeBatchCache *cache = static_cast<LatticeBatchCache *>(lt->batch_cache);
302 if (!cache) {
303 return;
304 }
305
309
312}
313
319
320/* gpu::Batch cache usage. */
322 LatticeBatchCache *cache,
323 bool use_weight,
324 const int actdef)
325{
327
328 if (cache->pos == nullptr) {
329 GPUVertFormat format = {0};
330 struct {
331 uint pos, col;
332 } attr_id;
333
335 if (use_weight) {
337 }
338
339 const int vert_len = lattice_render_data_verts_len_get(rdata);
340
342 GPU_vertbuf_data_alloc(*cache->pos, vert_len);
343 for (int i = 0; i < vert_len; i++) {
344 const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
345 GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
346
347 if (use_weight) {
348 const float no_active_weight = 666.0f;
349 float weight = (actdef > -1) ? BKE_defvert_find_weight(rdata->dvert + i, actdef) :
350 no_active_weight;
351 GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, &weight);
352 }
353 }
354 }
355
356 return cache->pos;
357}
358
360 LatticeBatchCache *cache)
361{
363
364 if (cache->edges == nullptr) {
365 const int vert_len = lattice_render_data_verts_len_get(rdata);
366 const int edge_len = lattice_render_data_edges_len_get(rdata);
367 int edge_len_real = 0;
368
370 GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
371
372#define LATT_INDEX(u, v, w) ((((w) * rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
373
374 for (int w = 0; w < rdata->dims.w_len; w++) {
375 int wxt = ELEM(w, 0, rdata->dims.w_len - 1);
376 for (int v = 0; v < rdata->dims.v_len; v++) {
377 int vxt = ELEM(v, 0, rdata->dims.v_len - 1);
378 for (int u = 0; u < rdata->dims.u_len; u++) {
379 int uxt = ELEM(u, 0, rdata->dims.u_len - 1);
380
381 if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
382 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
383 BLI_assert(edge_len_real <= edge_len);
384 edge_len_real++;
385 }
386 if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
387 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
388 BLI_assert(edge_len_real <= edge_len);
389 edge_len_real++;
390 }
391 if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
392 GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
393 BLI_assert(edge_len_real <= edge_len);
394 edge_len_real++;
395 }
396 }
397 }
398 }
399
400#undef LATT_INDEX
401
402 if (rdata->show_only_outside) {
403 BLI_assert(edge_len_real <= edge_len);
404 }
405 else {
406 BLI_assert(edge_len_real == edge_len);
407 }
408 UNUSED_VARS_NDEBUG(edge_len_real);
409
410 cache->edges = GPU_indexbuf_build(&elb);
411 }
412
413 return cache->edges;
414}
415
417{
418 /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
420
423
424 if (cache->overlay_verts == nullptr) {
425 static GPUVertFormat format = {0};
426 static struct {
427 uint pos, data;
428 } attr_id;
429 if (format.attr_len == 0) {
430 /* initialize vertex format */
433 }
434
435 const int vert_len = lattice_render_data_verts_len_get(rdata);
436
438 GPU_vertbuf_data_alloc(*vbo, vert_len);
439 for (int i = 0; i < vert_len; i++) {
440 const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
441
442 char vflag = 0;
443 if (bp->f1 & SELECT) {
444 if (i == rdata->actbp) {
445 vflag |= VFLAG_VERT_ACTIVE;
446 }
447 else {
448 vflag |= VFLAG_VERT_SELECTED;
449 }
450 }
451
452 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
453 GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
454 }
455
457 }
458
460}
461
462gpu::Batch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
463{
465
466 if (cache->all_edges == nullptr) {
467 /* create batch from Lattice */
469
472 lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
473 lattice_batch_cache_get_edges(rdata, cache));
474
476 }
477
478 return cache->all_edges;
479}
480
482{
484
485 if (cache->all_verts == nullptr) {
487
489 GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), nullptr);
490
492 }
493
494 return cache->all_verts;
495}
496
498{
500
501 if (cache->overlay_verts == nullptr) {
503 }
504
505 return cache->overlay_verts;
506}
507
508} // namespace blender::draw
support for deformation groups and hooks.
float BKE_defvert_find_weight(const MDeformVert *dvert, int defgroup)
Definition deform.cc:770
@ BKE_LATTICE_BATCH_DIRTY_SELECT
@ BKE_LATTICE_BATCH_DIRTY_ALL
#define BLI_assert(a)
Definition BLI_assert.h:50
unsigned int uint
#define UNUSED_VARS_NDEBUG(...)
#define ELEM(...)
@ LT_OUTSIDE
blender::gpu::Batch * GPU_batch_create_ex(GPUPrimType primitive_type, blender::gpu::VertBuf *vertex_buf, blender::gpu::IndexBuf *index_buf, eGPUBatchFlag owns_flag)
Definition gpu_batch.cc:56
#define GPU_batch_create(primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:149
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:205
@ GPU_BATCH_OWNS_VBO
Definition GPU_batch.hh:42
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
blender::gpu::IndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_attr_set(blender::gpu::VertBuf *, uint a_idx, uint v_idx, const void *data)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
uint GPU_vertformat_attr_add(GPUVertFormat *, const char *name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
MutableSpan< T > data()
#define SELECT
CCL_NAMESPACE_BEGIN struct Options options
#define LATT_INDEX(u, v, w)
struct @620::@623 attr_id
uint col
format
static char ** types
Definition makesdna.cc:71
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
blender::gpu::Batch * DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
void DRW_lattice_batch_cache_validate(Lattice *lt)
static int edge_len_calc(int u, int v, int w)
blender::gpu::Batch * DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, int actdef)
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
blender::gpu::Batch * DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
void DRW_lattice_batch_cache_free(Lattice *lt)
static void lattice_batch_cache_init(Lattice *lt)
static void lattice_batch_cache_clear(Lattice *lt)
static gpu::IndexBuf * lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
static const BPoint * lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
static LatticeBatchCache * lattice_batch_cache_get(Lattice *lt)
static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
static LatticeRenderData * lattice_render_data_create(Lattice *lt, const int types)
static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
static bool lattice_batch_cache_valid(Lattice *lt)
static void lattice_render_data_free(LatticeRenderData *rdata)
static gpu::VertBuf * lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache, bool use_weight, const int actdef)
static int lattice_render_verts_len_get(Lattice *lt)
static int lattice_render_edges_len_get(Lattice *lt)
static int vert_len_calc(int u, int v, int w)
uint8_t f1
float vec[4]
struct Lattice * latt
void * batch_cache
struct MDeformVert * dvert
struct EditLatt * editlatt
struct BPoint * def
struct blender::draw::LatticeBatchCache::@263 dims
struct blender::draw::LatticeRenderData::@262 dims