1 | //===-- release_test.cpp ----------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "tests/scudo_unit_test.h" |
10 | |
11 | #include "list.h" |
12 | #include "release.h" |
13 | #include "size_class_map.h" |
14 | |
15 | #include <string.h> |
16 | |
17 | #include <algorithm> |
18 | #include <random> |
19 | #include <set> |
20 | |
21 | TEST(ScudoReleaseTest, RegionPageMap) { |
22 | for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) { |
23 | // Various valid counter's max values packed into one word. |
24 | scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I); |
25 | ASSERT_TRUE(PageMap2N.isAllocated()); |
26 | EXPECT_EQ(1U, PageMap2N.getBufferNumElements()); |
27 | // Check the "all bit set" values too. |
28 | scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I); |
29 | ASSERT_TRUE(PageMap2N1_1.isAllocated()); |
30 | EXPECT_EQ(1U, PageMap2N1_1.getBufferNumElements()); |
31 | // Verify the packing ratio, the counter is Expected to be packed into the |
32 | // closest power of 2 bits. |
33 | scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I); |
34 | ASSERT_TRUE(PageMap.isAllocated()); |
35 | EXPECT_EQ(scudo::roundUpPowerOfTwo(Size: I + 1), PageMap.getBufferNumElements()); |
36 | } |
37 | |
38 | // Go through 1, 2, 4, 8, .. {32,64} bits per counter. |
39 | for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) { |
40 | // Make sure counters request one memory page for the buffer. |
41 | const scudo::uptr NumCounters = |
42 | (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I); |
43 | scudo::RegionPageMap PageMap(1U, NumCounters, |
44 | 1UL << ((1UL << I) - 1)); |
45 | ASSERT_TRUE(PageMap.isAllocated()); |
46 | PageMap.inc(Region: 0U, I: 0U); |
47 | for (scudo::uptr C = 1; C < NumCounters - 1; C++) { |
48 | EXPECT_EQ(0UL, PageMap.get(Region: 0U, I: C)); |
49 | PageMap.inc(Region: 0U, I: C); |
50 | EXPECT_EQ(1UL, PageMap.get(Region: 0U, I: C - 1)); |
51 | } |
52 | EXPECT_EQ(0UL, PageMap.get(Region: 0U, I: NumCounters - 1)); |
53 | PageMap.inc(Region: 0U, I: NumCounters - 1); |
54 | if (I > 0) { |
55 | PageMap.incRange(Region: 0u, From: 0U, To: NumCounters - 1); |
56 | for (scudo::uptr C = 0; C < NumCounters; C++) |
57 | EXPECT_EQ(2UL, PageMap.get(Region: 0U, I: C)); |
58 | } |
59 | } |
60 | |
61 | // Similar to the above except that we are using incN(). |
62 | for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) { |
63 | // Make sure counters request one memory page for the buffer. |
64 | const scudo::uptr NumCounters = |
65 | (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I); |
66 | scudo::uptr MaxValue = 1UL << ((1UL << I) - 1); |
67 | if (MaxValue <= 1U) |
68 | continue; |
69 | |
70 | scudo::RegionPageMap PageMap(1U, NumCounters, MaxValue); |
71 | |
72 | scudo::uptr N = MaxValue / 2; |
73 | PageMap.incN(Region: 0U, I: 0, N); |
74 | for (scudo::uptr C = 1; C < NumCounters; C++) { |
75 | EXPECT_EQ(0UL, PageMap.get(Region: 0U, I: C)); |
76 | PageMap.incN(Region: 0U, I: C, N); |
77 | EXPECT_EQ(N, PageMap.get(Region: 0U, I: C - 1)); |
78 | } |
79 | EXPECT_EQ(N, PageMap.get(Region: 0U, I: NumCounters - 1)); |
80 | } |
81 | } |
82 | |
83 | class StringRangeRecorder { |
84 | public: |
85 | std::string ReportedPages; |
86 | |
87 | StringRangeRecorder() |
88 | : PageSizeScaledLog(scudo::getLog2(X: scudo::getPageSizeCached())) {} |
89 | |
90 | void (scudo::uptr From, scudo::uptr To) { |
91 | From >>= PageSizeScaledLog; |
92 | To >>= PageSizeScaledLog; |
93 | EXPECT_LT(From, To); |
94 | if (!ReportedPages.empty()) |
95 | EXPECT_LT(LastPageReported, From); |
96 | ReportedPages.append(From - LastPageReported, '.'); |
97 | ReportedPages.append(To - From, 'x'); |
98 | LastPageReported = To; |
99 | } |
100 | |
101 | private: |
102 | const scudo::uptr PageSizeScaledLog; |
103 | scudo::uptr = 0; |
104 | }; |
105 | |
106 | TEST(ScudoReleaseTest, FreePagesRangeTracker) { |
107 | // 'x' denotes a page to be released, '.' denotes a page to be kept around. |
108 | const char *TestCases[] = { |
109 | "" , |
110 | "." , |
111 | "x" , |
112 | "........" , |
113 | "xxxxxxxxxxx" , |
114 | "..............xxxxx" , |
115 | "xxxxxxxxxxxxxxxxxx....." , |
116 | "......xxxxxxxx........" , |
117 | "xxx..........xxxxxxxxxxxxxxx" , |
118 | "......xxxx....xxxx........" , |
119 | "xxx..........xxxxxxxx....xxxxxxx" , |
120 | "x.x.x.x.x.x.x.x.x.x.x.x." , |
121 | ".x.x.x.x.x.x.x.x.x.x.x.x" , |
122 | ".x.x.x.x.x.x.x.x.x.x.x.x." , |
123 | "x.x.x.x.x.x.x.x.x.x.x.x.x" , |
124 | }; |
125 | typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker; |
126 | |
127 | for (auto TestCase : TestCases) { |
128 | StringRangeRecorder Recorder; |
129 | RangeTracker Tracker(Recorder); |
130 | for (scudo::uptr I = 0; TestCase[I] != 0; I++) |
131 | Tracker.processNextPage(Released: TestCase[I] == 'x'); |
132 | Tracker.finish(); |
133 | // Strip trailing '.'-pages before comparing the results as they are not |
134 | // going to be reported to range_recorder anyway. |
135 | const char *LastX = strrchr(s: TestCase, c: 'x'); |
136 | std::string Expected( |
137 | TestCase, |
138 | LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1)); |
139 | EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str()); |
140 | } |
141 | } |
142 | |
143 | class ReleasedPagesRecorder { |
144 | public: |
145 | ReleasedPagesRecorder() = default; |
146 | explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {} |
147 | std::set<scudo::uptr> ReportedPages; |
148 | |
149 | void (scudo::uptr From, scudo::uptr To) { |
150 | const scudo::uptr PageSize = scudo::getPageSizeCached(); |
151 | for (scudo::uptr I = From; I < To; I += PageSize) |
152 | ReportedPages.insert(I + getBase()); |
153 | } |
154 | |
155 | scudo::uptr getBase() const { return Base; } |
156 | scudo::uptr Base = 0; |
157 | }; |
158 | |
159 | // Simplified version of a TransferBatch. |
160 | template <class SizeClassMap> struct FreeBatch { |
161 | static const scudo::u16 MaxCount = SizeClassMap::MaxNumCachedHint; |
162 | void clear() { Count = 0; } |
163 | void add(scudo::uptr P) { |
164 | DCHECK_LT(Count, MaxCount); |
165 | Batch[Count++] = P; |
166 | } |
167 | scudo::u16 getCount() const { return Count; } |
168 | scudo::uptr get(scudo::u16 I) const { |
169 | DCHECK_LE(I, Count); |
170 | return Batch[I]; |
171 | } |
172 | FreeBatch *Next; |
173 | |
174 | private: |
175 | scudo::uptr Batch[MaxCount]; |
176 | scudo::u16 Count; |
177 | }; |
178 | |
179 | template <class SizeClassMap> void testReleaseFreeMemoryToOS() { |
180 | typedef FreeBatch<SizeClassMap> Batch; |
181 | const scudo::uptr PagesCount = 1024; |
182 | const scudo::uptr PageSize = scudo::getPageSizeCached(); |
183 | const scudo::uptr PageSizeLog = scudo::getLog2(X: PageSize); |
184 | std::mt19937 R; |
185 | scudo::u32 RandState = 42; |
186 | |
187 | for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { |
188 | const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); |
189 | const scudo::uptr MaxBlocks = PagesCount * PageSize / BlockSize; |
190 | |
191 | // Generate the random free list. |
192 | std::vector<scudo::uptr> FreeArray; |
193 | bool InFreeRange = false; |
194 | scudo::uptr CurrentRangeEnd = 0; |
195 | for (scudo::uptr I = 0; I < MaxBlocks; I++) { |
196 | if (I == CurrentRangeEnd) { |
197 | InFreeRange = (scudo::getRandomU32(State: &RandState) & 1U) == 1; |
198 | CurrentRangeEnd += (scudo::getRandomU32(State: &RandState) & 0x7f) + 1; |
199 | } |
200 | if (InFreeRange) |
201 | FreeArray.push_back(I * BlockSize); |
202 | } |
203 | if (FreeArray.empty()) |
204 | continue; |
205 | // Shuffle the array to ensure that the order is irrelevant. |
206 | std::shuffle(FreeArray.begin(), FreeArray.end(), R); |
207 | |
208 | // Build the FreeList from the FreeArray. |
209 | scudo::SinglyLinkedList<Batch> FreeList; |
210 | FreeList.clear(); |
211 | Batch *CurrentBatch = nullptr; |
212 | for (auto const &Block : FreeArray) { |
213 | if (!CurrentBatch) { |
214 | CurrentBatch = new Batch; |
215 | CurrentBatch->clear(); |
216 | FreeList.push_back(CurrentBatch); |
217 | } |
218 | CurrentBatch->add(Block); |
219 | if (CurrentBatch->getCount() == Batch::MaxCount) |
220 | CurrentBatch = nullptr; |
221 | } |
222 | |
223 | // Release the memory. |
224 | auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; }; |
225 | auto DecompactPtr = [](scudo::uptr P) { return P; }; |
226 | ReleasedPagesRecorder Recorder; |
227 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
228 | /*ReleaseSize=*/MaxBlocks * BlockSize); |
229 | ASSERT_FALSE(Context.hasBlockMarked()); |
230 | Context.markFreeBlocksInRegion(FreeList, DecompactPtr, Recorder.getBase(), |
231 | /*RegionIndex=*/0, MaxBlocks * BlockSize, |
232 | /*MayContainLastBlockInRegion=*/true); |
233 | ASSERT_TRUE(Context.hasBlockMarked()); |
234 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
235 | scudo::RegionPageMap &PageMap = Context.PageMap; |
236 | |
237 | // Verify that there are no released pages touched by used chunks and all |
238 | // ranges of free chunks big enough to contain the entire memory pages had |
239 | // these pages released. |
240 | scudo::uptr VerifiedReleasedPages = 0; |
241 | std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end()); |
242 | |
243 | scudo::uptr CurrentBlock = 0; |
244 | InFreeRange = false; |
245 | scudo::uptr CurrentFreeRangeStart = 0; |
246 | for (scudo::uptr I = 0; I < MaxBlocks; I++) { |
247 | const bool IsFreeBlock = |
248 | FreeBlocks.find(CurrentBlock) != FreeBlocks.end(); |
249 | if (IsFreeBlock) { |
250 | if (!InFreeRange) { |
251 | InFreeRange = true; |
252 | CurrentFreeRangeStart = CurrentBlock; |
253 | } |
254 | } else { |
255 | // Verify that this used chunk does not touch any released page. |
256 | const scudo::uptr StartPage = CurrentBlock / PageSize; |
257 | const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize; |
258 | for (scudo::uptr J = StartPage; J <= EndPage; J++) { |
259 | const bool = Recorder.ReportedPages.find(J * PageSize) != |
260 | Recorder.ReportedPages.end(); |
261 | EXPECT_EQ(false, PageReleased); |
262 | EXPECT_EQ(false, |
263 | PageMap.isAllCounted(Region: 0, I: (J * PageSize) >> PageSizeLog)); |
264 | } |
265 | |
266 | if (InFreeRange) { |
267 | InFreeRange = false; |
268 | // Verify that all entire memory pages covered by this range of free |
269 | // chunks were released. |
270 | scudo::uptr P = scudo::roundUp(X: CurrentFreeRangeStart, Boundary: PageSize); |
271 | while (P + PageSize <= CurrentBlock) { |
272 | const bool = |
273 | Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end(); |
274 | EXPECT_EQ(true, PageReleased); |
275 | EXPECT_EQ(true, PageMap.isAllCounted(Region: 0, I: P >> PageSizeLog)); |
276 | VerifiedReleasedPages++; |
277 | P += PageSize; |
278 | } |
279 | } |
280 | } |
281 | |
282 | CurrentBlock += BlockSize; |
283 | } |
284 | |
285 | if (InFreeRange) { |
286 | scudo::uptr P = scudo::roundUp(X: CurrentFreeRangeStart, Boundary: PageSize); |
287 | const scudo::uptr EndPage = |
288 | scudo::roundUp(X: MaxBlocks * BlockSize, Boundary: PageSize); |
289 | while (P + PageSize <= EndPage) { |
290 | const bool = |
291 | Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end(); |
292 | EXPECT_EQ(true, PageReleased); |
293 | EXPECT_EQ(true, PageMap.isAllCounted(Region: 0, I: P >> PageSizeLog)); |
294 | VerifiedReleasedPages++; |
295 | P += PageSize; |
296 | } |
297 | } |
298 | |
299 | EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages); |
300 | |
301 | while (!FreeList.empty()) { |
302 | CurrentBatch = FreeList.front(); |
303 | FreeList.pop_front(); |
304 | delete CurrentBatch; |
305 | } |
306 | } |
307 | } |
308 | |
309 | template <class SizeClassMap> void testPageMapMarkRange() { |
310 | const scudo::uptr PageSize = scudo::getPageSizeCached(); |
311 | |
312 | for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { |
313 | const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); |
314 | |
315 | const scudo::uptr GroupNum = 2; |
316 | const scudo::uptr GroupSize = scudo::roundUp(X: BlockSize, Boundary: PageSize) * 2; |
317 | const scudo::uptr RegionSize = |
318 | scudo::roundUpSlow(X: GroupSize * GroupNum, Boundary: BlockSize); |
319 | const scudo::uptr RoundedRegionSize = scudo::roundUp(X: RegionSize, Boundary: PageSize); |
320 | |
321 | std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); |
322 | for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) { |
323 | for (scudo::uptr Page = Block / PageSize; |
324 | Page <= (Block + BlockSize - 1) / PageSize && |
325 | Page < RoundedRegionSize / PageSize; |
326 | ++Page) { |
327 | ASSERT_LT(Page, Pages.size()); |
328 | ++Pages[Page]; |
329 | } |
330 | } |
331 | |
332 | for (scudo::uptr GroupId = 0; GroupId < GroupNum; ++GroupId) { |
333 | const scudo::uptr GroupBeg = GroupId * GroupSize; |
334 | const scudo::uptr GroupEnd = GroupBeg + GroupSize; |
335 | |
336 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
337 | /*ReleaseSize=*/RegionSize); |
338 | Context.markRangeAsAllCounted(From: GroupBeg, To: GroupEnd, /*Base=*/0U, |
339 | /*RegionIndex=*/0, RegionSize); |
340 | |
341 | scudo::uptr FirstBlock = |
342 | ((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize; |
343 | |
344 | // All the pages before first block page are not supposed to be marked. |
345 | if (FirstBlock / PageSize > 0) { |
346 | for (scudo::uptr Page = 0; Page <= FirstBlock / PageSize - 1; ++Page) |
347 | EXPECT_EQ(Context.PageMap.get(/*Region=*/0, I: Page), 0U); |
348 | } |
349 | |
350 | // Verify the pages used by the blocks in the group except that if the |
351 | // end of the last block is not aligned with `GroupEnd`, it'll be verified |
352 | // later. |
353 | scudo::uptr Block; |
354 | for (Block = FirstBlock; Block + BlockSize <= GroupEnd; |
355 | Block += BlockSize) { |
356 | for (scudo::uptr Page = Block / PageSize; |
357 | Page <= (Block + BlockSize - 1) / PageSize; ++Page) { |
358 | // First used page in the group has two cases, which are w/ and w/o |
359 | // block sitting across the boundary. |
360 | if (Page == FirstBlock / PageSize) { |
361 | if (FirstBlock % PageSize == 0) { |
362 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0U, I: Page)); |
363 | } else { |
364 | // There's a block straddling `GroupBeg`, it's supposed to only |
365 | // increment the counter and we expect it should be 1 less |
366 | // (exclude the straddling one) than the total blocks on the page. |
367 | EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, Page), |
368 | Pages[Page] - 1); |
369 | } |
370 | } else { |
371 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, I: Page)); |
372 | } |
373 | } |
374 | } |
375 | |
376 | if (Block == GroupEnd) |
377 | continue; |
378 | |
379 | // Examine the last block which sits across the group boundary. |
380 | if (Block + BlockSize == RegionSize) { |
381 | // This is the last block in the region, it's supposed to mark all the |
382 | // pages as all counted. |
383 | for (scudo::uptr Page = Block / PageSize; |
384 | Page <= (Block + BlockSize - 1) / PageSize; ++Page) { |
385 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, I: Page)); |
386 | } |
387 | } else { |
388 | for (scudo::uptr Page = Block / PageSize; |
389 | Page <= (Block + BlockSize - 1) / PageSize; ++Page) { |
390 | if (Page <= (GroupEnd - 1) / PageSize) |
391 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, I: Page)); |
392 | else |
393 | EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, I: Page), 1U); |
394 | } |
395 | } |
396 | |
397 | const scudo::uptr FirstUncountedPage = |
398 | scudo::roundUp(X: Block + BlockSize, Boundary: PageSize); |
399 | for (scudo::uptr Page = FirstUncountedPage; |
400 | Page <= RoundedRegionSize / PageSize; ++Page) { |
401 | EXPECT_EQ(Context.PageMap.get(/*Region=*/0U, I: Page), 0U); |
402 | } |
403 | } // Iterate each Group |
404 | |
405 | // Release the entire region. This is to ensure the last page is counted. |
406 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
407 | /*ReleaseSize=*/RegionSize); |
408 | Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0, |
409 | /*RegionIndex=*/0, RegionSize); |
410 | for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page) |
411 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, I: Page)); |
412 | } // Iterate each size class |
413 | } |
414 | |
415 | template <class SizeClassMap> void testReleasePartialRegion() { |
416 | typedef FreeBatch<SizeClassMap> Batch; |
417 | const scudo::uptr PageSize = scudo::getPageSizeCached(); |
418 | |
419 | for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { |
420 | // In the following, we want to ensure the region includes at least 2 pages |
421 | // and we will release all the pages except the first one. The handling of |
422 | // the last block is tricky, so we always test the case that includes the |
423 | // last block. |
424 | const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); |
425 | const scudo::uptr ReleaseBase = scudo::roundUp(X: BlockSize, Boundary: PageSize); |
426 | const scudo::uptr BasePageOffset = ReleaseBase / PageSize; |
427 | const scudo::uptr RegionSize = |
428 | scudo::roundUpSlow(X: scudo::roundUp(X: BlockSize, Boundary: PageSize) + ReleaseBase, |
429 | Boundary: BlockSize) + |
430 | BlockSize; |
431 | const scudo::uptr RoundedRegionSize = scudo::roundUp(X: RegionSize, Boundary: PageSize); |
432 | |
433 | scudo::SinglyLinkedList<Batch> FreeList; |
434 | FreeList.clear(); |
435 | |
436 | // Skip the blocks in the first page and add the remaining. |
437 | std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); |
438 | for (scudo::uptr Block = scudo::roundUpSlow(X: ReleaseBase, Boundary: BlockSize); |
439 | Block + BlockSize <= RoundedRegionSize; Block += BlockSize) { |
440 | for (scudo::uptr Page = Block / PageSize; |
441 | Page <= (Block + BlockSize - 1) / PageSize; ++Page) { |
442 | ASSERT_LT(Page, Pages.size()); |
443 | ++Pages[Page]; |
444 | } |
445 | } |
446 | |
447 | // This follows the logic how we count the last page. It should be |
448 | // consistent with how markFreeBlocksInRegion() handles the last block. |
449 | if (RoundedRegionSize % BlockSize != 0) |
450 | ++Pages.back(); |
451 | |
452 | Batch *CurrentBatch = nullptr; |
453 | for (scudo::uptr Block = scudo::roundUpSlow(X: ReleaseBase, Boundary: BlockSize); |
454 | Block < RegionSize; Block += BlockSize) { |
455 | if (CurrentBatch == nullptr || |
456 | CurrentBatch->getCount() == Batch::MaxCount) { |
457 | CurrentBatch = new Batch; |
458 | CurrentBatch->clear(); |
459 | FreeList.push_back(CurrentBatch); |
460 | } |
461 | CurrentBatch->add(Block); |
462 | } |
463 | |
464 | auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) { |
465 | auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; }; |
466 | ReleasedPagesRecorder Recorder(ReleaseBase); |
467 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
468 | const scudo::uptr FirstBlock = scudo::roundUpSlow(X: ReleaseBase, Boundary: BlockSize); |
469 | |
470 | for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) { |
471 | if (P < FirstBlock) { |
472 | // If FirstBlock is not aligned with page boundary, the first touched |
473 | // page will not be released either. |
474 | EXPECT_TRUE(Recorder.ReportedPages.find(P) == |
475 | Recorder.ReportedPages.end()); |
476 | } else { |
477 | EXPECT_TRUE(Recorder.ReportedPages.find(P) != |
478 | Recorder.ReportedPages.end()); |
479 | } |
480 | } |
481 | }; |
482 | |
483 | // Test marking by visiting each block. |
484 | { |
485 | auto DecompactPtr = [](scudo::uptr P) { return P; }; |
486 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
487 | /*ReleaseSize=*/RegionSize - PageSize, |
488 | ReleaseBase); |
489 | Context.markFreeBlocksInRegion(FreeList, DecompactPtr, /*Base=*/0U, |
490 | /*RegionIndex=*/0, RegionSize, |
491 | /*MayContainLastBlockInRegion=*/true); |
492 | for (const Batch &It : FreeList) { |
493 | for (scudo::u16 I = 0; I < It.getCount(); I++) { |
494 | scudo::uptr Block = It.get(I); |
495 | for (scudo::uptr Page = Block / PageSize; |
496 | Page <= (Block + BlockSize - 1) / PageSize; ++Page) { |
497 | EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U, |
498 | Page - BasePageOffset)); |
499 | } |
500 | } |
501 | } |
502 | |
503 | VerifyReleaseToOs(Context); |
504 | } |
505 | |
506 | // Test range marking. |
507 | { |
508 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
509 | /*ReleaseSize=*/RegionSize - PageSize, |
510 | ReleaseBase); |
511 | Context.markRangeAsAllCounted(From: ReleaseBase, To: RegionSize, /*Base=*/0U, |
512 | /*RegionIndex=*/0, RegionSize); |
513 | for (scudo::uptr Page = ReleaseBase / PageSize; |
514 | Page < RoundedRegionSize / PageSize; ++Page) { |
515 | if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) != |
516 | Pages[Page]) { |
517 | EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, |
518 | I: Page - BasePageOffset)); |
519 | } |
520 | } |
521 | |
522 | VerifyReleaseToOs(Context); |
523 | } |
524 | |
525 | // Check the buffer size of PageMap. |
526 | { |
527 | scudo::PageReleaseContext Full(BlockSize, /*NumberOfRegions=*/1U, |
528 | /*ReleaseSize=*/RegionSize); |
529 | Full.ensurePageMapAllocated(); |
530 | scudo::PageReleaseContext Partial(BlockSize, /*NumberOfRegions=*/1U, |
531 | /*ReleaseSize=*/RegionSize - PageSize, |
532 | ReleaseBase); |
533 | Partial.ensurePageMapAllocated(); |
534 | |
535 | EXPECT_GE(Full.PageMap.getBufferNumElements(), |
536 | Partial.PageMap.getBufferNumElements()); |
537 | } |
538 | |
539 | while (!FreeList.empty()) { |
540 | CurrentBatch = FreeList.front(); |
541 | FreeList.pop_front(); |
542 | delete CurrentBatch; |
543 | } |
544 | } // Iterate each size class |
545 | } |
546 | |
547 | TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) { |
548 | testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>(); |
549 | } |
550 | |
551 | TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) { |
552 | testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>(); |
553 | } |
554 | |
555 | TEST(ScudoReleaseTest, PageMapMarkRange) { |
556 | testPageMapMarkRange<scudo::DefaultSizeClassMap>(); |
557 | testPageMapMarkRange<scudo::AndroidSizeClassMap>(); |
558 | testPageMapMarkRange<scudo::FuchsiaSizeClassMap>(); |
559 | } |
560 | |
561 | TEST(ScudoReleaseTest, ReleasePartialRegion) { |
562 | testReleasePartialRegion<scudo::DefaultSizeClassMap>(); |
563 | testReleasePartialRegion<scudo::AndroidSizeClassMap>(); |
564 | testReleasePartialRegion<scudo::FuchsiaSizeClassMap>(); |
565 | } |
566 | |
567 | template <class SizeClassMap> void testReleaseRangeWithSingleBlock() { |
568 | const scudo::uptr PageSize = scudo::getPageSizeCached(); |
569 | |
570 | // We want to test if a memory group only contains single block that will be |
571 | // handled properly. The case is like: |
572 | // |
573 | // From To |
574 | // +----------------------+ |
575 | // +------------+------------+ |
576 | // | | | |
577 | // +------------+------------+ |
578 | // ^ |
579 | // RegionSize |
580 | // |
581 | // Note that `From` will be page aligned. |
582 | // |
583 | // If the second from the last block is aligned at `From`, then we expect all |
584 | // the pages after `From` will be marked as can-be-released. Otherwise, the |
585 | // pages only touched by the last blocks will be marked as can-be-released. |
586 | for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { |
587 | const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); |
588 | const scudo::uptr From = scudo::roundUp(X: BlockSize, Boundary: PageSize); |
589 | const scudo::uptr To = |
590 | From % BlockSize == 0 |
591 | ? From + BlockSize |
592 | : scudo::roundDownSlow(X: From + BlockSize, Boundary: BlockSize) + BlockSize; |
593 | const scudo::uptr RoundedRegionSize = scudo::roundUp(X: To, Boundary: PageSize); |
594 | |
595 | std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); |
596 | for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize; |
597 | Block += BlockSize) { |
598 | for (scudo::uptr Page = Block / PageSize; |
599 | Page <= (Block + BlockSize - 1) / PageSize && |
600 | Page < RoundedRegionSize / PageSize; |
601 | ++Page) { |
602 | ASSERT_LT(Page, Pages.size()); |
603 | ++Pages[Page]; |
604 | } |
605 | } |
606 | |
607 | scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
608 | /*ReleaseSize=*/To, |
609 | /*ReleaseBase=*/0U); |
610 | Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0, |
611 | /*RegionSize=*/To); |
612 | |
613 | for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) { |
614 | if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) != |
615 | Pages[Page / PageSize]) { |
616 | EXPECT_TRUE( |
617 | Context.PageMap.isAllCounted(/*Region=*/0U, I: Page / PageSize)); |
618 | } |
619 | } |
620 | } // for each size class |
621 | } |
622 | |
623 | TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) { |
624 | testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>(); |
625 | testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>(); |
626 | testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>(); |
627 | } |
628 | |
629 | TEST(ScudoReleaseTest, BufferPool) { |
630 | constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1; |
631 | constexpr scudo::uptr StaticBufferNumElements = 512U; |
632 | |
633 | // Allocate the buffer pool on the heap because it is quite large (slightly |
634 | // more than StaticBufferCount * StaticBufferNumElements * sizeof(uptr)) and |
635 | // it may not fit in the stack on some platforms. |
636 | using BufferPool = |
637 | scudo::BufferPool<StaticBufferCount, StaticBufferNumElements>; |
638 | std::unique_ptr<BufferPool> Pool(new BufferPool()); |
639 | |
640 | std::vector<BufferPool::Buffer> Buffers; |
641 | for (scudo::uptr I = 0; I < StaticBufferCount; ++I) { |
642 | BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements); |
643 | EXPECT_TRUE(Pool->isStaticBufferTestOnly(Buffer)); |
644 | Buffers.push_back(Buffer); |
645 | } |
646 | |
647 | // The static buffer is supposed to be used up. |
648 | BufferPool::Buffer Buffer = Pool->getBuffer(StaticBufferNumElements); |
649 | EXPECT_FALSE(Pool->isStaticBufferTestOnly(Buffer)); |
650 | |
651 | Pool->releaseBuffer(Buffer); |
652 | for (auto &Buffer : Buffers) |
653 | Pool->releaseBuffer(Buffer); |
654 | } |
655 | |