| 1 | //===-- tsan_shadow_test.cpp ----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | #include "tsan_platform.h" |
| 13 | #include "tsan_rtl.h" |
| 14 | #include "gtest/gtest.h" |
| 15 | |
| 16 | namespace __tsan { |
| 17 | |
| 18 | struct Region { |
| 19 | uptr start; |
| 20 | uptr end; |
| 21 | }; |
| 22 | |
| 23 | void CheckShadow(const Shadow *s, Sid sid, Epoch epoch, uptr addr, uptr size, |
| 24 | AccessType typ) { |
| 25 | uptr addr1 = 0; |
| 26 | uptr size1 = 0; |
| 27 | AccessType typ1 = 0; |
| 28 | s->GetAccess(&addr1, &size1, &typ1); |
| 29 | CHECK_EQ(s->sid(), sid); |
| 30 | CHECK_EQ(s->epoch(), epoch); |
| 31 | CHECK_EQ(addr1, addr); |
| 32 | CHECK_EQ(size1, size); |
| 33 | CHECK_EQ(typ1, typ); |
| 34 | } |
| 35 | |
| 36 | TEST(Shadow, Shadow) { |
| 37 | Sid sid = static_cast<Sid>(11); |
| 38 | Epoch epoch = static_cast<Epoch>(22); |
| 39 | FastState fs; |
| 40 | fs.SetSid(sid); |
| 41 | fs.SetEpoch(epoch); |
| 42 | CHECK_EQ(fs.sid(), sid); |
| 43 | CHECK_EQ(fs.epoch(), epoch); |
| 44 | CHECK_EQ(fs.GetIgnoreBit(), false); |
| 45 | fs.SetIgnoreBit(); |
| 46 | CHECK_EQ(fs.GetIgnoreBit(), true); |
| 47 | fs.ClearIgnoreBit(); |
| 48 | CHECK_EQ(fs.GetIgnoreBit(), false); |
| 49 | |
| 50 | Shadow s0(fs, 1, 2, kAccessWrite); |
| 51 | CheckShadow(&s0, sid, epoch, 1, 2, kAccessWrite); |
| 52 | Shadow s1(fs, 2, 3, kAccessRead); |
| 53 | CheckShadow(&s1, sid, epoch, 2, 3, kAccessRead); |
| 54 | Shadow s2(fs, 0xfffff8 + 4, 1, kAccessWrite | kAccessAtomic); |
| 55 | CheckShadow(&s2, sid, epoch, 4, 1, kAccessWrite | kAccessAtomic); |
| 56 | Shadow s3(fs, 0xfffff8 + 0, 8, kAccessRead | kAccessAtomic); |
| 57 | CheckShadow(&s3, sid, epoch, 0, 8, kAccessRead | kAccessAtomic); |
| 58 | |
| 59 | CHECK(!s0.IsBothReadsOrAtomic(kAccessRead | kAccessAtomic)); |
| 60 | CHECK(!s1.IsBothReadsOrAtomic(kAccessAtomic)); |
| 61 | CHECK(!s1.IsBothReadsOrAtomic(kAccessWrite)); |
| 62 | CHECK(s1.IsBothReadsOrAtomic(kAccessRead)); |
| 63 | CHECK(s2.IsBothReadsOrAtomic(kAccessAtomic)); |
| 64 | CHECK(!s2.IsBothReadsOrAtomic(kAccessWrite)); |
| 65 | CHECK(!s2.IsBothReadsOrAtomic(kAccessRead)); |
| 66 | CHECK(s3.IsBothReadsOrAtomic(kAccessAtomic)); |
| 67 | CHECK(!s3.IsBothReadsOrAtomic(kAccessWrite)); |
| 68 | CHECK(s3.IsBothReadsOrAtomic(kAccessRead)); |
| 69 | |
| 70 | CHECK(!s0.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); |
| 71 | CHECK(s1.IsRWWeakerOrEqual(kAccessWrite)); |
| 72 | CHECK(s1.IsRWWeakerOrEqual(kAccessRead)); |
| 73 | CHECK(!s1.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); |
| 74 | |
| 75 | CHECK(!s2.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); |
| 76 | CHECK(s2.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); |
| 77 | CHECK(s2.IsRWWeakerOrEqual(kAccessRead)); |
| 78 | CHECK(s2.IsRWWeakerOrEqual(kAccessWrite)); |
| 79 | |
| 80 | CHECK(s3.IsRWWeakerOrEqual(kAccessRead | kAccessAtomic)); |
| 81 | CHECK(s3.IsRWWeakerOrEqual(kAccessWrite | kAccessAtomic)); |
| 82 | CHECK(s3.IsRWWeakerOrEqual(kAccessRead)); |
| 83 | CHECK(s3.IsRWWeakerOrEqual(kAccessWrite)); |
| 84 | |
| 85 | Shadow sro(Shadow::kRodata); |
| 86 | CheckShadow(&sro, static_cast<Sid>(0), kEpochZero, 0, 0, kAccessRead); |
| 87 | } |
| 88 | |
| 89 | TEST(Shadow, Mapping) { |
| 90 | static int global; |
| 91 | int stack; |
| 92 | void *heap = malloc(0); |
| 93 | free(heap); |
| 94 | |
| 95 | CHECK(IsAppMem((uptr)&global)); |
| 96 | CHECK(IsAppMem((uptr)&stack)); |
| 97 | CHECK(IsAppMem((uptr)heap)); |
| 98 | |
| 99 | CHECK(IsShadowMem(MemToShadow((uptr)&global))); |
| 100 | CHECK(IsShadowMem(MemToShadow((uptr)&stack))); |
| 101 | CHECK(IsShadowMem(MemToShadow((uptr)heap))); |
| 102 | } |
| 103 | |
| 104 | TEST(Shadow, Celling) { |
| 105 | u64 aligned_data[4]; |
| 106 | char *data = (char*)aligned_data; |
| 107 | CHECK(IsAligned(reinterpret_cast<uptr>(data), kShadowSize)); |
| 108 | RawShadow *s0 = MemToShadow((uptr)&data[0]); |
| 109 | CHECK(IsAligned(reinterpret_cast<uptr>(s0), kShadowSize)); |
| 110 | for (unsigned i = 1; i < kShadowCell; i++) |
| 111 | CHECK_EQ(s0, MemToShadow((uptr)&data[i])); |
| 112 | for (unsigned i = kShadowCell; i < 2*kShadowCell; i++) |
| 113 | CHECK_EQ(s0 + kShadowCnt, MemToShadow((uptr)&data[i])); |
| 114 | for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++) |
| 115 | CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i])); |
| 116 | } |
| 117 | |
| 118 | // Detect is the Mapping has kBroken field. |
| 119 | template <uptr> |
| 120 | struct Has { |
| 121 | typedef bool Result; |
| 122 | }; |
| 123 | |
| 124 | template <typename Mapping> |
| 125 | bool broken(...) { |
| 126 | return false; |
| 127 | } |
| 128 | |
| 129 | template <typename Mapping> |
| 130 | bool broken(uptr what, typename Has<Mapping::kBroken>::Result = false) { |
| 131 | return Mapping::kBroken & what; |
| 132 | } |
| 133 | |
| 134 | static int CompareRegion(const void *region_a, const void *region_b) { |
| 135 | uptr start_a = ((const struct Region *)region_a)->start; |
| 136 | uptr start_b = ((const struct Region *)region_b)->start; |
| 137 | |
| 138 | if (start_a < start_b) { |
| 139 | return -1; |
| 140 | } else if (start_a > start_b) { |
| 141 | return 1; |
| 142 | } else { |
| 143 | return 0; |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | template <typename Mapping> |
| 148 | static void AddMetaRegion(struct Region *shadows, int *num_regions, uptr start, |
| 149 | uptr end) { |
| 150 | // If the app region is not empty, add its meta to the array. |
| 151 | if (start != end) { |
| 152 | shadows[*num_regions].start = (uptr)MemToMetaImpl::Apply<Mapping>(start); |
| 153 | shadows[*num_regions].end = (uptr)MemToMetaImpl::Apply<Mapping>(end - 1); |
| 154 | *num_regions = (*num_regions) + 1; |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | struct MappingTest { |
| 159 | template <typename Mapping> |
| 160 | static void Apply() { |
| 161 | // Easy (but ugly) way to print the mapping name. |
| 162 | Printf("%s\n" , __PRETTY_FUNCTION__); |
| 163 | TestRegion<Mapping>(Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd); |
| 164 | TestRegion<Mapping>(Mapping::kMidAppMemBeg, Mapping::kMidAppMemEnd); |
| 165 | TestRegion<Mapping>(Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd); |
| 166 | TestRegion<Mapping>(Mapping::kHeapMemBeg, Mapping::kHeapMemEnd); |
| 167 | |
| 168 | TestDisjointMetas<Mapping>(); |
| 169 | |
| 170 | // Not tested: the ordering of regions (low app vs. shadow vs. mid app |
| 171 | // etc.). That is enforced at runtime by CheckAndProtect. |
| 172 | } |
| 173 | |
| 174 | template <typename Mapping> |
| 175 | static void TestRegion(uptr beg, uptr end) { |
| 176 | if (beg == end) |
| 177 | return; |
| 178 | Printf("checking region [0x%zx-0x%zx)\n" , beg, end); |
| 179 | uptr prev = 0; |
| 180 | for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 256) { |
| 181 | for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { |
| 182 | const uptr p = RoundDown(p0 + x, kShadowCell); |
| 183 | if (p < beg || p >= end) |
| 184 | continue; |
| 185 | const uptr s = MemToShadowImpl::Apply<Mapping>(p); |
| 186 | u32 *const m = MemToMetaImpl::Apply<Mapping>(p); |
| 187 | const uptr r = ShadowToMemImpl::Apply<Mapping>(s); |
| 188 | Printf(" addr=0x%zx: shadow=0x%zx meta=%p reverse=0x%zx\n" , p, s, m, |
| 189 | r); |
| 190 | CHECK(IsAppMemImpl::Apply<Mapping>(p)); |
| 191 | if (!broken<Mapping>(kBrokenMapping)) |
| 192 | CHECK(IsShadowMemImpl::Apply<Mapping>(s)); |
| 193 | CHECK(IsMetaMemImpl::Apply<Mapping>(reinterpret_cast<uptr>(m))); |
| 194 | CHECK_EQ(p, RestoreAddrImpl::Apply<Mapping>(CompressAddr(p))); |
| 195 | if (!broken<Mapping>(kBrokenReverseMapping)) |
| 196 | CHECK_EQ(p, r); |
| 197 | if (prev && !broken<Mapping>(kBrokenLinearity)) { |
| 198 | // Ensure that shadow and meta mappings are linear within a single |
| 199 | // user range. Lots of code that processes memory ranges assumes it. |
| 200 | const uptr prev_s = MemToShadowImpl::Apply<Mapping>(prev); |
| 201 | u32 *const prev_m = MemToMetaImpl::Apply<Mapping>(prev); |
| 202 | CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); |
| 203 | CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell); |
| 204 | } |
| 205 | prev = p; |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | template <typename Mapping> |
| 211 | static void TestDisjointMetas() { |
| 212 | // Checks that the meta for each app region does not overlap with |
| 213 | // the meta for other app regions. For example, the meta for a high |
| 214 | // app pointer shouldn't be aliased to the meta of a mid app pointer. |
| 215 | // Notice that this is important even though there does not exist a |
| 216 | // MetaToMem function. |
| 217 | // (If a MetaToMem function did exist, we could simply |
| 218 | // check in the TestRegion function that it inverts MemToMeta.) |
| 219 | // |
| 220 | // We don't try to be clever by allowing the non-PIE (low app) |
| 221 | // and PIE (mid and high app) meta regions to overlap. |
| 222 | struct Region metas[4]; |
| 223 | int num_regions = 0; |
| 224 | AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kLoAppMemBeg, |
| 225 | Mapping::kLoAppMemEnd); |
| 226 | AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kMidAppMemBeg, |
| 227 | Mapping::kMidAppMemEnd); |
| 228 | AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kHiAppMemBeg, |
| 229 | Mapping::kHiAppMemEnd); |
| 230 | AddMetaRegion<Mapping>(metas, &num_regions, Mapping::kHeapMemBeg, |
| 231 | Mapping::kHeapMemEnd); |
| 232 | |
| 233 | // It is not required that the low app shadow is below the mid app |
| 234 | // shadow etc., hence we sort the shadows. |
| 235 | qsort(metas, num_regions, sizeof(struct Region), CompareRegion); |
| 236 | |
| 237 | for (int i = 0; i < num_regions; i++) |
| 238 | Printf("[0x%lu, 0x%lu]\n" , metas[i].start, metas[i].end); |
| 239 | |
| 240 | if (!broken<Mapping>(kBrokenAliasedMetas)) |
| 241 | for (int i = 1; i < num_regions; i++) |
| 242 | CHECK(metas[i - 1].end <= metas[i].start); |
| 243 | } |
| 244 | }; |
| 245 | |
| 246 | TEST(Shadow, AllMappings) { ForEachMapping<MappingTest>(); } |
| 247 | |
| 248 | } // namespace __tsan |
| 249 | |