Blender V4.3
image_drawing_mode.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#pragma once
10
12
13#include "IMB_imbuf_types.hh"
14#include "IMB_interp.hh"
15
18
19#include "image_batches.hh"
20#include "image_private.hh"
21
23
24constexpr float EPSILON_UV_BOUNDS = 0.00001f;
25
27 protected:
29
30 protected:
32
33 public:
37 virtual void ensure_texture_infos() = 0;
38
42 virtual void update_bounds(const ARegion *region) = 0;
43
45};
46
52 public:
54 void ensure_texture_infos() override
55 {
57 }
58
59 void update_bounds(const ARegion *region) override
60 {
62 float2 region_uv_min = math::transform_point(mat, float3(0.0f, 0.0f, 0.0f)).xy();
63 float2 region_uv_max = math::transform_point(mat, float3(1.0f, 1.0f, 0.0f)).xy();
64
65 TextureInfo &texture_info = instance_data->texture_infos[0];
66 texture_info.tile_id = int2(0);
67 texture_info.need_full_update = false;
68 rctf new_clipping_uv_bounds;
69 BLI_rctf_init(&new_clipping_uv_bounds,
70 region_uv_min.x,
71 region_uv_max.x,
72 region_uv_min.y,
73 region_uv_max.y);
74
75 if (memcmp(&new_clipping_uv_bounds, &texture_info.clipping_uv_bounds, sizeof(rctf))) {
76 texture_info.clipping_uv_bounds = new_clipping_uv_bounds;
77 texture_info.need_full_update = true;
78 }
79
80 rcti new_clipping_bounds;
81 BLI_rcti_init(&new_clipping_bounds, 0, region->winx, 0, region->winy);
82 if (memcmp(&new_clipping_bounds, &texture_info.clipping_bounds, sizeof(rcti))) {
83 texture_info.clipping_bounds = new_clipping_bounds;
84 texture_info.need_full_update = true;
85 }
86 }
87
89 {
90 TextureInfo &texture_info = instance_data->texture_infos[0];
91 int2 texture_size = int2(BLI_rcti_size_x(&texture_info.clipping_bounds),
92 BLI_rcti_size_y(&texture_info.clipping_bounds));
93 texture_info.ensure_gpu_texture(texture_size);
94 }
95};
96
103template<size_t Divisions> class ScreenTileTextures : public BaseTextureMethod {
104 public:
105 static const size_t TexturesPerDimension = Divisions + 1;
108
109 private:
113 struct TextureInfoBounds {
114 TextureInfo *info = nullptr;
115 rctf uv_bounds;
116 /* Offset of this tile to be drawn on the screen (number of tiles from bottom left corner). */
117 int2 tile_id;
118 };
119
120 public:
122
126 void ensure_texture_infos() override
127 {
129 }
130
134 void update_bounds(const ARegion *region) override
135 {
136 /* determine uv_area of the region. */
137 Vector<TextureInfo *> unassigned_textures;
139 float2 region_uv_min = math::transform_point(mat, float3(0.0f, 0.0f, 0.0f)).xy();
140 float2 region_uv_max = math::transform_point(mat, float3(1.0f, 1.0f, 0.0f)).xy();
141 float2 region_uv_span = region_uv_max - region_uv_min;
142
143 /* Calculate uv coordinates of each vert in the grid of textures. */
144
145 /* Construct the uv bounds of the 4 textures that are needed to fill the region. */
146 Vector<TextureInfoBounds> info_bounds = create_uv_bounds(region_uv_span, region_uv_min);
147 assign_texture_infos_by_uv_bounds(info_bounds, unassigned_textures);
148 assign_unused_texture_infos(info_bounds, unassigned_textures);
149
150 /* Calculate the region bounds from the uv bounds. */
151 rctf region_uv_bounds;
153 &region_uv_bounds, region_uv_min.x, region_uv_max.x, region_uv_min.y, region_uv_max.y);
154 update_region_bounds_from_uv_bounds(region_uv_bounds, int2(region->winx, region->winy));
155 }
156
161 {
162 float2 viewport_size = DRW_viewport_size_get();
163 int2 texture_size(ceil(viewport_size.x / Divisions), ceil(viewport_size.y / Divisions));
164 return texture_size;
165 }
166
168 {
169 int2 texture_size = gpu_texture_size();
171 info.ensure_gpu_texture(texture_size);
172 }
173 }
174
175 private:
176 Vector<TextureInfoBounds> create_uv_bounds(float2 region_uv_span, float2 region_uv_min)
177 {
179 float2 region_tile_uv_span = region_uv_span / float2(float(Divisions));
180 float2 onscreen_multiple = (blender::math::floor(region_uv_min / region_tile_uv_span) +
181 float2(1.0f)) *
182 region_tile_uv_span;
183 for (int y = 0; y < VerticesPerDimension; y++) {
184 for (int x = 0; x < VerticesPerDimension; x++) {
185 uv_coords[x][y] = region_tile_uv_span * float2(float(x - 1), float(y - 1)) +
186 onscreen_multiple;
187 }
188 }
189
190 Vector<TextureInfoBounds> info_bounds;
191 for (int x = 0; x < TexturesPerDimension; x++) {
192 for (int y = 0; y < TexturesPerDimension; y++) {
193 TextureInfoBounds texture_info_bounds;
194 texture_info_bounds.tile_id = int2(x, y);
195 BLI_rctf_init(&texture_info_bounds.uv_bounds,
196 uv_coords[x][y].x,
197 uv_coords[x + 1][y + 1].x,
198 uv_coords[x][y].y,
199 uv_coords[x + 1][y + 1].y);
200 info_bounds.append(texture_info_bounds);
201 }
202 }
203 return info_bounds;
204 }
205
206 void assign_texture_infos_by_uv_bounds(Vector<TextureInfoBounds> &info_bounds,
207 Vector<TextureInfo *> &r_unassigned_textures)
208 {
209 for (TextureInfo &info : instance_data->texture_infos) {
210 bool assigned = false;
211 for (TextureInfoBounds &info_bound : info_bounds) {
212 if (info_bound.info == nullptr &&
213 BLI_rctf_compare(&info_bound.uv_bounds, &info.clipping_uv_bounds, 0.001))
214 {
215 info_bound.info = &info;
216 info.tile_id = info_bound.tile_id;
217 assigned = true;
218 break;
219 }
220 }
221 if (!assigned) {
222 r_unassigned_textures.append(&info);
223 }
224 }
225 }
226
227 void assign_unused_texture_infos(Vector<TextureInfoBounds> &info_bounds,
228 Vector<TextureInfo *> &unassigned_textures)
229 {
230 for (TextureInfoBounds &info_bound : info_bounds) {
231 if (info_bound.info == nullptr) {
232 info_bound.info = unassigned_textures.pop_last();
233 info_bound.info->tile_id = info_bound.tile_id;
234 info_bound.info->need_full_update = true;
235 info_bound.info->clipping_uv_bounds = info_bound.uv_bounds;
236 }
237 }
238 }
239
240 void update_region_bounds_from_uv_bounds(const rctf &region_uv_bounds, const int2 region_size)
241 {
242 rctf region_bounds;
243 BLI_rctf_init(&region_bounds, 0.0, region_size.x, 0.0, region_size.y);
244 float4x4 uv_to_screen;
245 BLI_rctf_transform_calc_m4_pivot_min(&region_uv_bounds, &region_bounds, uv_to_screen.ptr());
246 int2 tile_origin(0);
247 for (const TextureInfo &info : instance_data->texture_infos) {
248 if (info.tile_id == int2(0)) {
249 tile_origin = int2(math::transform_point(
250 uv_to_screen,
251 float3(info.clipping_uv_bounds.xmin, info.clipping_uv_bounds.ymin, 0.0)));
252 break;
253 }
254 }
255
256 const int2 texture_size = gpu_texture_size();
257 for (TextureInfo &info : instance_data->texture_infos) {
258 int2 bottom_left = tile_origin + texture_size * info.tile_id;
259 int2 top_right = bottom_left + texture_size;
260 BLI_rcti_init(&info.clipping_bounds, bottom_left.x, top_right.x, bottom_left.y, top_right.y);
261 }
262 }
263};
264
266using namespace blender::bke::image;
267
268template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractDrawingMode {
269 private:
270 DRWPass *create_image_pass() const
271 {
274 return DRW_pass_create("Image", state);
275 }
276
277 DRWPass *create_depth_pass() const
278 {
279 /* Depth is needed for background overlay rendering. Near depth is used for
280 * transparency checker and Far depth is used for indicating the image size. */
282 return DRW_pass_create("Depth", state);
283 }
284
285 void add_shgroups(const IMAGE_InstanceData *instance_data) const
286 {
287 const ShaderParameters &sh_params = instance_data->sh_params;
290
291 DRWShadingGroup *shgrp = DRW_shgroup_create(shader, instance_data->passes.image_pass);
292 DRW_shgroup_uniform_vec2_copy(shgrp, "farNearDistances", sh_params.far_near);
293 DRW_shgroup_uniform_vec4_copy(shgrp, "shuffle", sh_params.shuffle);
294 DRW_shgroup_uniform_int_copy(shgrp, "drawFlags", static_cast<int32_t>(sh_params.flags));
295 DRW_shgroup_uniform_bool_copy(shgrp, "imgPremultiplied", sh_params.use_premul_alpha);
296 DRW_shgroup_uniform_texture(shgrp, "depth_texture", dtxl->depth);
297 float image_mat[4][4];
298 unit_m4(image_mat);
299 for (const TextureInfo &info : instance_data->texture_infos) {
300 DRWShadingGroup *shgrp_sub = DRW_shgroup_create_sub(shgrp);
301 DRW_shgroup_uniform_ivec2_copy(shgrp_sub, "offset", info.offset());
303 shgrp_sub, "imageTexture", info.texture, GPUSamplerState::default_sampler());
304 DRW_shgroup_call_obmat(shgrp_sub, info.batch, image_mat);
305 }
306 }
307
313 void add_depth_shgroups(IMAGE_InstanceData &instance_data,
314 Image *image,
315 ImageUser *image_user) const
316 {
318 DRWShadingGroup *shgrp = DRW_shgroup_create(shader, instance_data.passes.depth_pass);
319
320 float image_mat[4][4];
321 unit_m4(image_mat);
322
323 ImageUser tile_user = {0};
324 if (image_user) {
325 tile_user = *image_user;
326 }
327
328 for (const TextureInfo &info : instance_data.texture_infos) {
329 LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
330 const ImageTileWrapper image_tile(image_tile_ptr);
331 const int tile_x = image_tile.get_tile_x_offset();
332 const int tile_y = image_tile.get_tile_y_offset();
333 tile_user.tile = image_tile.get_tile_number();
334
335 /* NOTE: `BKE_image_has_ibuf` doesn't work as it fails for render results. That could be a
336 * bug or a feature. For now we just acquire to determine if there is a texture. */
337 void *lock;
338 ImBuf *tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, &lock);
339 if (tile_buffer != nullptr) {
340 instance_data.float_buffers.mark_used(tile_buffer);
341
343 float4 min_max_uv(tile_x, tile_y, tile_x + 1, tile_y + 1);
344 DRW_shgroup_uniform_vec4_copy(shsub, "min_max_uv", min_max_uv);
345 DRW_shgroup_call_obmat(shsub, info.batch, image_mat);
346 }
347 BKE_image_release_ibuf(image, tile_buffer, lock);
348 }
349 }
350 }
351
358 void update_textures(IMAGE_InstanceData &instance_data,
359 Image *image,
360 ImageUser *image_user) const
361 {
363 image, image_user, instance_data.partial_update.user);
365
366 switch (changes.get_result_code()) {
367 case ePartialUpdateCollectResult::FullUpdateNeeded:
368 instance_data.mark_all_texture_slots_dirty();
369 instance_data.float_buffers.clear();
370 break;
371 case ePartialUpdateCollectResult::NoChangesDetected:
372 break;
373 case ePartialUpdateCollectResult::PartialChangesDetected:
374 /* Partial update when wrap repeat is enabled is not supported. */
375 if (instance_data.flags.do_tile_drawing) {
376 instance_data.float_buffers.clear();
377 instance_data.mark_all_texture_slots_dirty();
378 }
379 else {
380 do_partial_update(changes, instance_data);
381 }
382 break;
383 }
384 do_full_update_for_dirty_textures(instance_data, image_user);
385 }
386
390 void do_partial_update_float_buffer(
391 ImBuf *float_buffer, PartialUpdateChecker<ImageTileData>::CollectResult &iterator) const
392 {
393 ImBuf *src = iterator.tile_data.tile_buffer;
394 BLI_assert(float_buffer->float_buffer.data != nullptr);
395 BLI_assert(float_buffer->byte_buffer.data == nullptr);
396 BLI_assert(src->float_buffer.data == nullptr);
397 BLI_assert(src->byte_buffer.data != nullptr);
398
399 /* Calculate the overlap between the updated region and the buffer size. Partial Update Checker
400 * always returns a tile (256x256). Which could lay partially outside the buffer when using
401 * different resolutions.
402 */
403 rcti buffer_rect;
404 BLI_rcti_init(&buffer_rect, 0, float_buffer->x, 0, float_buffer->y);
405 rcti clipped_update_region;
406 const bool has_overlap = BLI_rcti_isect(
407 &buffer_rect, &iterator.changed_region.region, &clipped_update_region);
408 if (!has_overlap) {
409 return;
410 }
411
412 IMB_float_from_rect_ex(float_buffer, src, &clipped_update_region);
413 }
414
415 void do_partial_update(PartialUpdateChecker<ImageTileData>::CollectResult &iterator,
416 IMAGE_InstanceData &instance_data) const
417 {
418 while (iterator.get_next_change() == ePartialUpdateIterResult::ChangeAvailable) {
419 /* Quick exit when tile_buffer isn't available. */
420 if (iterator.tile_data.tile_buffer == nullptr) {
421 continue;
422 }
423 ImBuf *tile_buffer = instance_data.float_buffers.cached_float_buffer(
424 iterator.tile_data.tile_buffer);
425 if (tile_buffer != iterator.tile_data.tile_buffer) {
426 do_partial_update_float_buffer(tile_buffer, iterator);
427 }
428
429 const float tile_width = float(iterator.tile_data.tile_buffer->x);
430 const float tile_height = float(iterator.tile_data.tile_buffer->y);
431
432 for (const TextureInfo &info : instance_data.texture_infos) {
433 /* Dirty images will receive a full update. No need to do a partial one now. */
434 if (info.need_full_update) {
435 continue;
436 }
437 GPUTexture *texture = info.texture;
438 const float texture_width = GPU_texture_width(texture);
439 const float texture_height = GPU_texture_height(texture);
440 /* TODO: early bound check. */
441 ImageTileWrapper tile_accessor(iterator.tile_data.tile);
442 float tile_offset_x = float(tile_accessor.get_tile_x_offset());
443 float tile_offset_y = float(tile_accessor.get_tile_y_offset());
444 rcti *changed_region_in_texel_space = &iterator.changed_region.region;
445 rctf changed_region_in_uv_space;
447 &changed_region_in_uv_space,
448 float(changed_region_in_texel_space->xmin) / float(iterator.tile_data.tile_buffer->x) +
449 tile_offset_x,
450 float(changed_region_in_texel_space->xmax) / float(iterator.tile_data.tile_buffer->x) +
451 tile_offset_x,
452 float(changed_region_in_texel_space->ymin) / float(iterator.tile_data.tile_buffer->y) +
453 tile_offset_y,
454 float(changed_region_in_texel_space->ymax) / float(iterator.tile_data.tile_buffer->y) +
455 tile_offset_y);
456 rctf changed_overlapping_region_in_uv_space;
457 const bool region_overlap = BLI_rctf_isect(&info.clipping_uv_bounds,
458 &changed_region_in_uv_space,
459 &changed_overlapping_region_in_uv_space);
460 if (!region_overlap) {
461 continue;
462 }
463 /* Convert the overlapping region to texel space and to ss_pixel space...
464 * TODO: first convert to ss_pixel space as integer based. and from there go back to texel
465 * space. But perhaps this isn't needed and we could use an extraction offset somehow. */
466 rcti gpu_texture_region_to_update;
468 &gpu_texture_region_to_update,
469 floor((changed_overlapping_region_in_uv_space.xmin - info.clipping_uv_bounds.xmin) *
470 texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
471 floor((changed_overlapping_region_in_uv_space.xmax - info.clipping_uv_bounds.xmin) *
472 texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
473 ceil((changed_overlapping_region_in_uv_space.ymin - info.clipping_uv_bounds.ymin) *
474 texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)),
475 ceil((changed_overlapping_region_in_uv_space.ymax - info.clipping_uv_bounds.ymin) *
476 texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)));
477
478 rcti tile_region_to_extract;
480 &tile_region_to_extract,
481 floor((changed_overlapping_region_in_uv_space.xmin - tile_offset_x) * tile_width),
482 floor((changed_overlapping_region_in_uv_space.xmax - tile_offset_x) * tile_width),
483 ceil((changed_overlapping_region_in_uv_space.ymin - tile_offset_y) * tile_height),
484 ceil((changed_overlapping_region_in_uv_space.ymax - tile_offset_y) * tile_height));
485
486 /* Create an image buffer with a size.
487 * Extract and scale into an imbuf. */
488 const int texture_region_width = BLI_rcti_size_x(&gpu_texture_region_to_update);
489 const int texture_region_height = BLI_rcti_size_y(&gpu_texture_region_to_update);
490
491 ImBuf extracted_buffer;
493 &extracted_buffer, texture_region_width, texture_region_height, 32, IB_rectfloat);
494
495 int offset = 0;
496 for (int y = gpu_texture_region_to_update.ymin; y < gpu_texture_region_to_update.ymax; y++)
497 {
498 float yf = y / (float)texture_height;
499 float v = info.clipping_uv_bounds.ymax * yf + info.clipping_uv_bounds.ymin * (1.0 - yf) -
500 tile_offset_y;
501 for (int x = gpu_texture_region_to_update.xmin; x < gpu_texture_region_to_update.xmax;
502 x++)
503 {
504 float xf = x / (float)texture_width;
505 float u = info.clipping_uv_bounds.xmax * xf +
506 info.clipping_uv_bounds.xmin * (1.0 - xf) - tile_offset_x;
508 &extracted_buffer.float_buffer.data[offset * 4],
509 u * tile_buffer->x,
510 v * tile_buffer->y);
511 offset++;
512 }
513 }
514 IMB_gpu_clamp_half_float(&extracted_buffer);
515
518 extracted_buffer.float_buffer.data,
519 gpu_texture_region_to_update.xmin,
520 gpu_texture_region_to_update.ymin,
521 0,
522 extracted_buffer.x,
523 extracted_buffer.y,
524 0);
525 imb_freerectImbuf_all(&extracted_buffer);
526 }
527 }
528 }
529
530 void do_full_update_for_dirty_textures(IMAGE_InstanceData &instance_data,
531 const ImageUser *image_user) const
532 {
533 for (TextureInfo &info : instance_data.texture_infos) {
534 if (!info.need_full_update) {
535 continue;
536 }
537 do_full_update_gpu_texture(info, instance_data, image_user);
538 }
539 }
540
541 void do_full_update_gpu_texture(TextureInfo &info,
542 IMAGE_InstanceData &instance_data,
543 const ImageUser *image_user) const
544 {
545 ImBuf texture_buffer;
546 const int texture_width = GPU_texture_width(info.texture);
547 const int texture_height = GPU_texture_height(info.texture);
548 IMB_initImBuf(&texture_buffer, texture_width, texture_height, 0, IB_rectfloat);
549 ImageUser tile_user = {0};
550 if (image_user) {
551 tile_user = *image_user;
552 }
553
554 void *lock;
555
556 Image *image = instance_data.image;
557 LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
558 const ImageTileWrapper image_tile(image_tile_ptr);
559 tile_user.tile = image_tile.get_tile_number();
560
561 ImBuf *tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, &lock);
562 if (tile_buffer != nullptr) {
563 do_full_update_texture_slot(instance_data, info, texture_buffer, *tile_buffer, image_tile);
564 }
565 BKE_image_release_ibuf(image, tile_buffer, lock);
566 }
567 IMB_gpu_clamp_half_float(&texture_buffer);
569 imb_freerectImbuf_all(&texture_buffer);
570 }
571
576 void do_full_update_texture_slot(IMAGE_InstanceData &instance_data,
577 const TextureInfo &texture_info,
578 ImBuf &texture_buffer,
579 ImBuf &tile_buffer,
580 const ImageTileWrapper &image_tile) const
581 {
582 const int texture_width = texture_buffer.x;
583 const int texture_height = texture_buffer.y;
584 ImBuf *float_tile_buffer = instance_data.float_buffers.cached_float_buffer(&tile_buffer);
585
586 /* IMB_transform works in a non-consistent space. This should be documented or fixed!.
587 * Construct a variant of the info_uv_to_texture that adds the texel space
588 * transformation. */
589 float4x4 uv_to_texel;
590 rctf texture_area;
591 rctf tile_area;
592
593 BLI_rctf_init(&texture_area, 0.0, texture_width, 0.0, texture_height);
595 &tile_area,
596 tile_buffer.x * (texture_info.clipping_uv_bounds.xmin - image_tile.get_tile_x_offset()),
597 tile_buffer.x * (texture_info.clipping_uv_bounds.xmax - image_tile.get_tile_x_offset()),
598 tile_buffer.y * (texture_info.clipping_uv_bounds.ymin - image_tile.get_tile_y_offset()),
599 tile_buffer.y * (texture_info.clipping_uv_bounds.ymax - image_tile.get_tile_y_offset()));
600 BLI_rctf_transform_calc_m4_pivot_min(&tile_area, &texture_area, uv_to_texel.ptr());
601 uv_to_texel = math::invert(uv_to_texel);
602
603 rctf crop_rect;
604 const rctf *crop_rect_ptr = nullptr;
605 eIMBTransformMode transform_mode;
606 if (instance_data.flags.do_tile_drawing) {
607 transform_mode = IMB_TRANSFORM_MODE_WRAP_REPEAT;
608 }
609 else {
610 BLI_rctf_init(&crop_rect, 0.0, tile_buffer.x, 0.0, tile_buffer.y);
611 crop_rect_ptr = &crop_rect;
612 transform_mode = IMB_TRANSFORM_MODE_CROP_SRC;
613 }
614
615 IMB_transform(float_tile_buffer,
616 &texture_buffer,
617 transform_mode,
619 uv_to_texel.ptr(),
620 crop_rect_ptr);
621 }
622
623 public:
624 void begin_sync(IMAGE_Data *vedata) const override
625 {
626 IMAGE_InstanceData *instance_data = vedata->instance_data;
627 instance_data->passes.image_pass = create_image_pass();
628 instance_data->passes.depth_pass = create_depth_pass();
629 }
630
631 void image_sync(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const override
632 {
633 const DRWContextState *draw_ctx = DRW_context_state_get();
634 IMAGE_InstanceData *instance_data = vedata->instance_data;
635
636 TextureMethod method(instance_data);
637 method.ensure_texture_infos();
638
639 instance_data->partial_update.ensure_image(image);
640 instance_data->clear_need_full_update_flag();
641 instance_data->float_buffers.reset_usage_flags();
642
643 /* Step: Find out which screen space textures are needed to draw on the screen. Recycle
644 * textures that are not on screen anymore. */
645 const ARegion *region = draw_ctx->region;
646 method.update_bounds(region);
647
648 /* Step: Check for changes in the image user compared to the last time. */
649 instance_data->update_image_usage(iuser);
650
651 /* Step: Update the GPU textures based on the changes in the image. */
652 method.ensure_gpu_textures_allocation();
653 update_textures(*instance_data, image, iuser);
654
655 /* Step: Add the GPU textures to the shgroup. */
656 instance_data->update_batches();
657 if (!instance_data->flags.do_tile_drawing) {
658 add_depth_shgroups(*instance_data, image, iuser);
659 }
660 add_shgroups(instance_data);
661 }
662
663 void draw_finish(IMAGE_Data *vedata) const override
664 {
665 IMAGE_InstanceData *instance_data = vedata->instance_data;
666 instance_data->float_buffers.remove_unused_buffers();
667 }
668
669 void draw_viewport(IMAGE_Data *vedata) const override
670 {
671 IMAGE_InstanceData *instance_data = vedata->instance_data;
672
675
676 static float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
677 float clear_depth = instance_data->flags.do_tile_drawing ? 0.75 : 1.0f;
678 GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, clear_depth);
679
680 DRW_view_set_active(instance_data->view);
681 DRW_draw_pass(instance_data->passes.depth_pass);
683 DRW_draw_pass(instance_data->passes.image_pass);
684 DRW_view_set_active(nullptr);
686 }
687};
688
689} // namespace blender::draw::image_engine
ImBuf * BKE_image_acquire_ibuf(Image *ima, ImageUser *iuser, void **r_lock)
void BKE_image_release_ibuf(Image *ima, ImBuf *ibuf, void *lock)
#define BLI_assert(a)
Definition BLI_assert.h:50
#define LISTBASE_FOREACH(type, var, list)
void unit_m4(float m[4][4])
Definition rct.c:1127
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition BLI_rect.h:193
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition rct.c:418
void BLI_rctf_transform_calc_m4_pivot_min(const rctf *dst, const rctf *src, float matrix[4][4])
Definition rct.c:555
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
Definition rct.c:408
bool BLI_rcti_isect(const struct rcti *src1, const struct rcti *src2, struct rcti *dest)
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition BLI_rect.h:189
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
Definition BLI_rect.h:197
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
Definition BLI_rect.h:201
bool BLI_rctf_compare(const struct rctf *rect_a, const struct rctf *rect_b, float limit)
#define DRW_shgroup_call_obmat(shgroup, geom, obmat)
void GPU_framebuffer_clear_color_depth(GPUFrameBuffer *fb, const float clear_col[4], float clear_depth)
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer)
int GPU_texture_height(const GPUTexture *texture)
int GPU_texture_width(const GPUTexture *texture)
@ GPU_DATA_FLOAT
void GPU_texture_update_sub(GPUTexture *texture, eGPUDataFormat data_format, const void *pixels, int offset_x, int offset_y, int offset_z, int width, int height, int depth)
void GPU_texture_update(GPUTexture *texture, eGPUDataFormat data_format, const void *data)
void IMB_float_from_rect_ex(ImBuf *dst, const ImBuf *src, const rcti *region_to_update)
Definition divers.cc:748
void imb_freerectImbuf_all(ImBuf *ibuf)
eIMBTransformMode
Transform modes to use for IMB_transform function.
Definition IMB_imbuf.hh:676
@ IMB_TRANSFORM_MODE_WRAP_REPEAT
Wrap repeat the source buffer. Only supported in with nearest filtering.
Definition IMB_imbuf.hh:682
@ IMB_TRANSFORM_MODE_CROP_SRC
Crop the source buffer.
Definition IMB_imbuf.hh:680
void IMB_transform(const ImBuf *src, ImBuf *dst, eIMBTransformMode mode, eIMBInterpolationFilterMode filter, const float transform_matrix[4][4], const rctf *src_crop)
Transform source image buffer onto destination image buffer using a transform matrix.
bool IMB_initImBuf(ImBuf *ibuf, unsigned int x, unsigned int y, unsigned char planes, unsigned int flags)
@ IMB_FILTER_NEAREST
Definition IMB_imbuf.hh:289
void IMB_gpu_clamp_half_float(ImBuf *image_buffer)
Definition util_gpu.cc:404
Contains defines and structs used throughout the imbuf module.
@ IB_rectfloat
volatile int lock
struct GPUShader GPUShader
ATTR_WARN_UNUSED_RESULT const BMVert * v
void append(const T &value)
virtual void update_bounds(const ARegion *region)=0
Update the uv and region bounds of all texture_infos of instance_data.
virtual void ensure_texture_infos()=0
Ensure enough texture infos are allocated in instance_data.
BaseTextureMethod(IMAGE_InstanceData *instance_data)
OneTexture(IMAGE_InstanceData *instance_data)
void ensure_texture_infos() override
Ensure enough texture infos are allocated in instance_data.
void update_bounds(const ARegion *region) override
Update the uv and region bounds of all texture_infos of instance_data.
void image_sync(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const override
void draw_viewport(IMAGE_Data *vedata) const override
void draw_finish(IMAGE_Data *vedata) const override
void begin_sync(IMAGE_Data *vedata) const override
Screen space method using a multiple textures covering the region.
void ensure_texture_infos() override
Ensure enough texture infos are allocated in instance_data.
void update_bounds(const ARegion *region) override
Update the uv and region bounds of all texture_infos of instance_data.
ScreenTileTextures(IMAGE_InstanceData *instance_data)
DefaultFramebufferList * DRW_viewport_framebuffer_list_get()
const float * DRW_viewport_size_get()
DefaultTextureList * DRW_viewport_texture_list_get()
const DRWContextState * DRW_context_state_get()
DRWShadingGroup * DRW_shgroup_create(GPUShader *shader, DRWPass *pass)
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex, GPUSamplerState sampler_state)
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
DRWPass * DRW_pass_create(const char *name, DRWState state)
void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
DRWShadingGroup * DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_draw_pass(DRWPass *pass)
void DRW_view_set_active(const DRWView *view)
DRWState
Definition draw_state.hh:25
@ DRW_STATE_WRITE_DEPTH
Definition draw_state.hh:29
@ DRW_STATE_WRITE_COLOR
Definition draw_state.hh:30
@ DRW_STATE_DEPTH_LESS_EQUAL
Definition draw_state.hh:38
@ DRW_STATE_DEPTH_ALWAYS
Definition draw_state.hh:36
@ DRW_STATE_BLEND_ALPHA_PREMUL
Definition draw_state.hh:57
draw_view in_light_buf[] float
static ulong state[N]
GPUShader * IMAGE_shader_image_get()
GPUShader * IMAGE_shader_depth_get()
float4 interpolate_nearest_border_fl(const ImBuf *in, float u, float v)
Definition IMB_interp.hh:27
T floor(const T &a)
CartesianBasis invert(const CartesianBasis &basis)
T ceil(const T &a)
VecBase< T, 3 > transform_point(const CartesianBasis &basis, const VecBase< T, 3 > &v)
MatBase< float, 4, 4 > float4x4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< float, 3 > float3
signed int int32_t
Definition stdint.h:77
ARegion * region
GPUFrameBuffer * color_only_fb
GPUFrameBuffer * default_fb
static constexpr GPUSamplerState default_sampler()
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
void ensure_image(const Image *new_image)
Ensure that there is a partial update user for the given image.
PartialUpdateUser * user
const c_style_mat & ptr() const
VecBase< T, 2 > xy() const
ePartialUpdateIterResult get_next_change()
Load the next changed region.
CollectResult collect_changes()
Check for new changes since the last time this method was invoked for this user.
struct rcti region
region of the image that has been updated. Region can be bigger than actual changes.
struct blender::draw::image_engine::IMAGE_InstanceData::@206 passes
struct blender::draw::image_engine::IMAGE_InstanceData::@205 flags
void update_image_usage(const ImageUser *image_user)
bool do_tile_drawing
should we perform tiled drawing (wrap repeat).
float ss_to_texture[4][4]
Transform matrix to convert a normalized screen space coordinates to texture space.
GPUTexture * texture
GPU Texture for a partial region of the image editor.
bool need_full_update
does this texture need a full update.
rcti clipping_bounds
area of the texture in screen space.
rctf clipping_uv_bounds
uv area of the texture in screen space.
int x
Definition types_int2.h:15
int y
Definition types_int2.h:15
float xmax
float xmin
float ymax
float ymin
int ymin
int ymax
int xmin
int xmax