1//
2// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions
7// are met:
8//
9// Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11//
12// Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following
14// disclaimer in the documentation and/or other materials provided
15// with the distribution.
16//
17// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
18// contributors may be used to endorse or promote products derived
19// from this software without specific prior written permission.
20//
21// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32// POSSIBILITY OF SUCH DAMAGE.
33//
34
35#include "../Include/Common.h"
36#include "../Include/PoolAlloc.h"
37
38#include "../Include/InitializeGlobals.h"
39#include "../OSDependent/osinclude.h"
40
41namespace QtShaderTools {
42namespace glslang {
43
44// Process-wide TLS index
45OS_TLSIndex PoolIndex;
46
47// Return the thread-specific current pool.
48TPoolAllocator& GetThreadPoolAllocator()
49{
50 return *static_cast<TPoolAllocator*>(OS_GetTLSValue(nIndex: PoolIndex));
51}
52
53// Set the thread-specific current pool.
54void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
55{
56 OS_SetTLSValue(nIndex: PoolIndex, lpvValue: poolAllocator);
57}
58
59// Process-wide set up of the TLS pool storage.
60bool InitializePoolIndex()
61{
62 // Allocate a TLS index.
63 if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
64 return false;
65
66 return true;
67}
68
69//
70// Implement the functionality of the TPoolAllocator class, which
71// is documented in PoolAlloc.h.
72//
73TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
74 pageSize(growthIncrement),
75 alignment(allocationAlignment),
76 freeList(nullptr),
77 inUseList(nullptr),
78 numCalls(0)
79{
80 //
81 // Don't allow page sizes we know are smaller than all common
82 // OS page sizes.
83 //
84 if (pageSize < 4*1024)
85 pageSize = 4*1024;
86
87 //
88 // A large currentPageOffset indicates a new page needs to
89 // be obtained to allocate memory.
90 //
91 currentPageOffset = pageSize;
92
93 //
94 // Adjust alignment to be at least pointer aligned and
95 // power of 2.
96 //
97 size_t minAlign = sizeof(void*);
98 alignment &= ~(minAlign - 1);
99 if (alignment < minAlign)
100 alignment = minAlign;
101 size_t a = 1;
102 while (a < alignment)
103 a <<= 1;
104 alignment = a;
105 alignmentMask = a - 1;
106
107 //
108 // Align header skip
109 //
110 headerSkip = minAlign;
111 if (headerSkip < sizeof(tHeader)) {
112 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
113 }
114
115 push();
116}
117
118TPoolAllocator::~TPoolAllocator()
119{
120 while (inUseList) {
121 tHeader* next = inUseList->nextPage;
122 inUseList->~tHeader();
123 delete [] reinterpret_cast<char*>(inUseList);
124 inUseList = next;
125 }
126
127 //
128 // Always delete the free list memory - it can't be being
129 // (correctly) referenced, whether the pool allocator was
130 // global or not. We should not check the guard blocks
131 // here, because we did it already when the block was
132 // placed into the free list.
133 //
134 while (freeList) {
135 tHeader* next = freeList->nextPage;
136 delete [] reinterpret_cast<char*>(freeList);
137 freeList = next;
138 }
139}
140
141const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
142const unsigned char TAllocation::guardBlockEndVal = 0xfe;
143const unsigned char TAllocation::userDataFill = 0xcd;
144
145# ifdef GUARD_BLOCKS
146 const size_t TAllocation::guardBlockSize = 16;
147# else
148 const size_t TAllocation::guardBlockSize = 0;
149# endif
150
151//
152// Check a single guard block for damage
153//
154#ifdef GUARD_BLOCKS
155void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
156#else
157void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
158#endif
159{
160#ifdef GUARD_BLOCKS
161 for (size_t x = 0; x < guardBlockSize; x++) {
162 if (blockMem[x] != val) {
163 const int maxSize = 80;
164 char assertMsg[maxSize];
165
166 // We don't print the assert message. It's here just to be helpful.
167 snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
168 locText, size, data());
169 assert(0 && "PoolAlloc: Damage in guard block");
170 }
171 }
172#else
173 assert(guardBlockSize == 0);
174#endif
175}
176
177void TPoolAllocator::push()
178{
179 tAllocState state = { .offset: currentPageOffset, .page: inUseList };
180
181 stack.push_back(x: state);
182
183 //
184 // Indicate there is no current page to allocate from.
185 //
186 currentPageOffset = pageSize;
187}
188
189//
190// Do a mass-deallocation of all the individual allocations
191// that have occurred since the last push(), or since the
192// last pop(), or since the object's creation.
193//
194// The deallocated pages are saved for future allocations.
195//
196void TPoolAllocator::pop()
197{
198 if (stack.size() < 1)
199 return;
200
201 tHeader* page = stack.back().page;
202 currentPageOffset = stack.back().offset;
203
204 while (inUseList != page) {
205 tHeader* nextInUse = inUseList->nextPage;
206 size_t pageCount = inUseList->pageCount;
207
208 // This technically ends the lifetime of the header as C++ object,
209 // but we will still control the memory and reuse it.
210 inUseList->~tHeader(); // currently, just a debug allocation checker
211
212 if (pageCount > 1) {
213 delete [] reinterpret_cast<char*>(inUseList);
214 } else {
215 inUseList->nextPage = freeList;
216 freeList = inUseList;
217 }
218 inUseList = nextInUse;
219 }
220
221 stack.pop_back();
222}
223
224//
225// Do a mass-deallocation of all the individual allocations
226// that have occurred.
227//
228void TPoolAllocator::popAll()
229{
230 while (stack.size() > 0)
231 pop();
232}
233
234void* TPoolAllocator::allocate(size_t numBytes)
235{
236 // If we are using guard blocks, all allocations are bracketed by
237 // them: [guardblock][allocation][guardblock]. numBytes is how
238 // much memory the caller asked for. allocationSize is the total
239 // size including guard blocks. In release build,
240 // guardBlockSize=0 and this all gets optimized away.
241 size_t allocationSize = TAllocation::allocationSize(size: numBytes);
242
243 //
244 // Just keep some interesting statistics.
245 //
246 ++numCalls;
247 totalBytes += numBytes;
248
249 //
250 // Do the allocation, most likely case first, for efficiency.
251 // This step could be moved to be inline sometime.
252 //
253 if (currentPageOffset + allocationSize <= pageSize) {
254 //
255 // Safe to allocate from currentPageOffset.
256 //
257 unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
258 currentPageOffset += allocationSize;
259 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
260
261 return initializeAllocation(inUseList, memory, numBytes);
262 }
263
264 if (allocationSize + headerSkip > pageSize) {
265 //
266 // Do a multi-page allocation. Don't mix these with the others.
267 // The OS is efficient and allocating and free-ing multiple pages.
268 //
269 size_t numBytesToAlloc = allocationSize + headerSkip;
270 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
271 if (memory == 0)
272 return 0;
273
274 // Use placement-new to initialize header
275 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
276 inUseList = memory;
277
278 currentPageOffset = pageSize; // make next allocation come from a new page
279
280 // No guard blocks for multi-page allocations (yet)
281 return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
282 }
283
284 //
285 // Need a simple page to allocate from.
286 //
287 tHeader* memory;
288 if (freeList) {
289 memory = freeList;
290 freeList = freeList->nextPage;
291 } else {
292 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
293 if (memory == 0)
294 return 0;
295 }
296
297 // Use placement-new to initialize header
298 new(memory) tHeader(inUseList, 1);
299 inUseList = memory;
300
301 unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
302 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
303
304 return initializeAllocation(inUseList, memory: ret, numBytes);
305}
306
307//
308// Check all allocations in a list for damage by calling check on each.
309//
310void TAllocation::checkAllocList() const
311{
312 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
313 alloc->check();
314}
315
316} // end namespace glslang
317} // namespace QtShaderTools
318

source code of qtshadertools/src/3rdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp