Blender V5.0
guarded_allocator.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
7#include <cstddef>
8#include <cstdlib>
9#include <new>
10
11#ifdef WITH_BLENDER_GUARDEDALLOC
13#endif
14
16
17/* Internal use only. */
18void util_guarded_mem_alloc(const size_t n);
19void util_guarded_mem_free(const size_t n);
20
21/* Guarded allocator for the use with STL. */
22template<typename T> class GuardedAllocator {
23 public:
24 using size_type = size_t;
25 using difference_type = ptrdiff_t;
26 using pointer = T *;
27 using const_pointer = const T *;
28 using reference = T &;
29 using const_reference = const T &;
30 using value_type = T;
31
32 GuardedAllocator() = default;
33 GuardedAllocator(const GuardedAllocator & /*unused*/) = default;
34
35 T *allocate(const size_t n, const void *hint = nullptr)
36 {
37 (void)hint;
38 size_t size = n * sizeof(T);
40 if (n == 0) {
41 return nullptr;
42 }
43 T *mem;
44#ifdef WITH_BLENDER_GUARDEDALLOC
45 /* C++ standard requires allocation functions to allocate memory suitably
46 * aligned for any standard type. This is 16 bytes for 64 bit platform as
47 * far as i concerned. We might over-align on 32bit here, but that should
48 * be all safe actually.
49 */
50 mem = (T *)MEM_mallocN_aligned(size, 16, "Cycles Alloc");
51#else
52 mem = (T *)malloc(size);
53#endif
54 if (mem == nullptr) {
55 throw std::bad_alloc();
56 }
57 return mem;
58 }
59
60 void deallocate(T *p, const size_t n)
61 {
62 util_guarded_mem_free(n * sizeof(T));
63 if (p != nullptr) {
64#ifdef WITH_BLENDER_GUARDEDALLOC
65 MEM_freeN(const_cast<void *>(static_cast<const void *>(p)));
66#else
67 free(p);
68#endif
69 }
70 }
71
72 T *address(T &x) const
73 {
74 return &x;
75 }
76
77 const T *address(const T &x) const
78 {
79 return &x;
80 }
81
82 GuardedAllocator<T> &operator=(const GuardedAllocator & /*unused*/) = default;
83
84 size_t max_size() const
85 {
86 return size_t(-1);
87 }
88
89 template<class U> struct rebind {
91 };
92
93 template<class U> GuardedAllocator(const GuardedAllocator<U> & /*unused*/) {}
94
95 template<class U> GuardedAllocator &operator=(const GuardedAllocator<U> & /*unused*/)
96 {
97 return *this;
98 }
99
100 bool operator==(const GuardedAllocator & /*other*/) const
101 {
102 return true;
103 }
104 bool operator!=(const GuardedAllocator &other) const
105 {
106 return !operator==(other);
107 }
108
109#ifdef _MSC_VER
110 /* Welcome to the black magic here.
111 *
112 * The issue is that MSVC C++ allocates container proxy on any
113 * vector initialization, including static vectors which don't
114 * have any data yet. This leads to several issues:
115 *
116 * - Static objects initialization fiasco (global_stats from
117 * util_stats.h might not be initialized yet).
118 * - If main() function changes allocator type (for example,
119 * this might happen with `blender --debug-memory`) nobody
120 * will know how to convert already allocated memory to a new
121 * guarded allocator.
122 *
123 * Here we work this around by making it so container proxy does
124 * not use guarded allocation. A bit fragile, unfortunately.
125 */
126 template<> struct rebind<std::_Container_proxy> {
127 typedef std::allocator<std::_Container_proxy> other;
128 };
129
130 operator std::allocator<std::_Container_proxy>() const
131 {
132 return std::allocator<std::_Container_proxy>();
133 }
134#endif
135};
136
137/* Get memory usage and peak from the guarded STL allocator. */
140
141/* Call given function and keep track if it runs out of memory.
142 *
143 * If it does run out f memory, stop execution and set progress
144 * to do a global cancel.
145 *
146 * It's not fully robust, but good enough to catch obvious issues
147 * when running out of memory.
148 */
149#define MEM_GUARDED_CALL(progress, func, ...) \
150 do { \
151 try { \
152 (func)(__VA_ARGS__); \
153 } \
154 catch (std::bad_alloc &) { \
155 LOG_ERROR << "Out of memory"; \
156 fflush(stderr); \
157 (progress)->set_error("Out of memory"); \
158 } \
159 } while (false)
160
void BLI_kdtree_nd_ free(KDTree *tree)
Read Guarded memory(de)allocation.
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
bool operator!=(const GuardedAllocator &other) const
GuardedAllocator< T > & operator=(const GuardedAllocator &)=default
bool operator==(const GuardedAllocator &) const
T * allocate(const size_t n, const void *hint=nullptr)
GuardedAllocator & operator=(const GuardedAllocator< U > &)
T * address(T &x) const
GuardedAllocator(const GuardedAllocator< U > &)
GuardedAllocator()=default
const T * address(const T &x) const
GuardedAllocator(const GuardedAllocator &)=default
size_t max_size() const
void deallocate(T *p, const size_t n)
#define CCL_NAMESPACE_END
size_t util_guarded_get_mem_used()
size_t util_guarded_get_mem_peak()
CCL_NAMESPACE_BEGIN void util_guarded_mem_alloc(const size_t n)
void util_guarded_mem_free(const size_t n)
void * MEM_mallocN_aligned(size_t len, size_t alignment, const char *str)
Definition mallocn.cc:138
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
#define T
GuardedAllocator< U > other