1//===-- sanitizer_quarantine_test.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10//
11//===----------------------------------------------------------------------===//
12#include "sanitizer_common/sanitizer_common.h"
13#include "sanitizer_common/sanitizer_quarantine.h"
14#include "gtest/gtest.h"
15
16#include <stdlib.h>
17
18namespace __sanitizer {
19
20struct QuarantineCallback {
21 void Recycle(void *m) {}
22 void *Allocate(uptr size) {
23 return malloc(size: size);
24 }
25 void Deallocate(void *p) {
26 free(ptr: p);
27 }
28};
29
30typedef QuarantineCache<QuarantineCallback> Cache;
31
32static void* kFakePtr = reinterpret_cast<void*>(0xFA83FA83);
33static const size_t kBlockSize = 8;
34
35static QuarantineCallback cb;
36
37static void DeallocateCache(Cache *cache) {
38 while (QuarantineBatch *batch = cache->DequeueBatch())
39 cb.Deallocate(p: batch);
40}
41
42TEST(SanitizerCommon, QuarantineBatchMerge) {
43 // Verify the trivial case.
44 QuarantineBatch into;
45 into.init(ptr: kFakePtr, size: 4UL);
46 QuarantineBatch from;
47 from.init(ptr: kFakePtr, size: 8UL);
48
49 into.merge(from: &from);
50
51 ASSERT_EQ(into.count, 2UL);
52 ASSERT_EQ(into.batch[0], kFakePtr);
53 ASSERT_EQ(into.batch[1], kFakePtr);
54 ASSERT_EQ(into.size, 12UL + sizeof(QuarantineBatch));
55 ASSERT_EQ(into.quarantined_size(), 12UL);
56
57 ASSERT_EQ(from.count, 0UL);
58 ASSERT_EQ(from.size, sizeof(QuarantineBatch));
59 ASSERT_EQ(from.quarantined_size(), 0UL);
60
61 // Merge the batch to the limit.
62 for (uptr i = 2; i < QuarantineBatch::kSize; ++i)
63 from.push_back(ptr: kFakePtr, size: 8UL);
64 ASSERT_TRUE(into.count + from.count == QuarantineBatch::kSize);
65 ASSERT_TRUE(into.can_merge(from: &from));
66
67 into.merge(from: &from);
68 ASSERT_TRUE(into.count == QuarantineBatch::kSize);
69
70 // No more space, not even for one element.
71 from.init(ptr: kFakePtr, size: 8UL);
72
73 ASSERT_FALSE(into.can_merge(from: &from));
74}
75
76TEST(SanitizerCommon, QuarantineCacheMergeBatchesEmpty) {
77 Cache cache;
78 Cache to_deallocate;
79 cache.MergeBatches(to_deallocate: &to_deallocate);
80
81 ASSERT_EQ(to_deallocate.Size(), 0UL);
82 ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);
83}
84
85TEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {
86 Cache cache;
87 cache.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
88 ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());
89
90 Cache to_deallocate;
91 cache.MergeBatches(to_deallocate: &to_deallocate);
92
93 // Nothing to merge, nothing to deallocate.
94 ASSERT_EQ(kBlockSize + sizeof(QuarantineBatch), cache.Size());
95
96 ASSERT_EQ(to_deallocate.Size(), 0UL);
97 ASSERT_EQ(to_deallocate.DequeueBatch(), nullptr);
98
99 DeallocateCache(cache: &cache);
100}
101
102TEST(SanitizerCommon, QuarantineCacheMergeBatchesSmallBatches) {
103 // Make a cache with two batches small enough to merge.
104 Cache from;
105 from.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
106 Cache cache;
107 cache.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
108
109 cache.Transfer(from_cache: &from);
110 ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch) * 2, cache.Size());
111
112 Cache to_deallocate;
113 cache.MergeBatches(to_deallocate: &to_deallocate);
114
115 // Batches merged, one batch to deallocate.
116 ASSERT_EQ(kBlockSize * 2 + sizeof(QuarantineBatch), cache.Size());
117 ASSERT_EQ(to_deallocate.Size(), sizeof(QuarantineBatch));
118
119 DeallocateCache(cache: &cache);
120 DeallocateCache(cache: &to_deallocate);
121}
122
123TEST(SanitizerCommon, QuarantineCacheMergeBatchesTooBigToMerge) {
124 const uptr kNumBlocks = QuarantineBatch::kSize - 1;
125
126 // Make a cache with two batches small enough to merge.
127 Cache from;
128 Cache cache;
129 for (uptr i = 0; i < kNumBlocks; ++i) {
130 from.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
131 cache.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
132 }
133 cache.Transfer(from_cache: &from);
134 ASSERT_EQ(kBlockSize * kNumBlocks * 2 +
135 sizeof(QuarantineBatch) * 2, cache.Size());
136
137 Cache to_deallocate;
138 cache.MergeBatches(to_deallocate: &to_deallocate);
139
140 // Batches cannot be merged.
141 ASSERT_EQ(kBlockSize * kNumBlocks * 2 +
142 sizeof(QuarantineBatch) * 2, cache.Size());
143 ASSERT_EQ(to_deallocate.Size(), 0UL);
144
145 DeallocateCache(cache: &cache);
146}
147
148TEST(SanitizerCommon, QuarantineCacheMergeBatchesALotOfBatches) {
149 const uptr kNumBatchesAfterMerge = 3;
150 const uptr kNumBlocks = QuarantineBatch::kSize * kNumBatchesAfterMerge;
151 const uptr kNumBatchesBeforeMerge = kNumBlocks;
152
153 // Make a cache with many small batches.
154 Cache cache;
155 for (uptr i = 0; i < kNumBlocks; ++i) {
156 Cache from;
157 from.Enqueue(cb, ptr: kFakePtr, size: kBlockSize);
158 cache.Transfer(from_cache: &from);
159 }
160
161 ASSERT_EQ(kBlockSize * kNumBlocks +
162 sizeof(QuarantineBatch) * kNumBatchesBeforeMerge, cache.Size());
163
164 Cache to_deallocate;
165 cache.MergeBatches(to_deallocate: &to_deallocate);
166
167 // All blocks should fit into 3 batches.
168 ASSERT_EQ(kBlockSize * kNumBlocks +
169 sizeof(QuarantineBatch) * kNumBatchesAfterMerge, cache.Size());
170
171 ASSERT_EQ(to_deallocate.Size(),
172 sizeof(QuarantineBatch) *
173 (kNumBatchesBeforeMerge - kNumBatchesAfterMerge));
174
175 DeallocateCache(cache: &cache);
176 DeallocateCache(cache: &to_deallocate);
177}
178
179} // namespace __sanitizer
180

source code of compiler-rt/lib/sanitizer_common/tests/sanitizer_quarantine_test.cpp