1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
24#include "clang/AST/OperationKinds.h"
25#include "clang/AST/Stmt.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/AST/Type.h"
28#include "clang/Analysis/Analyses/PostOrderCFGView.h"
29#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
32#include "clang/Analysis/AnalysisDeclContext.h"
33#include "clang/Analysis/CFG.h"
34#include "clang/Basic/Builtins.h"
35#include "clang/Basic/LLVM.h"
36#include "clang/Basic/OperatorKinds.h"
37#include "clang/Basic/SourceLocation.h"
38#include "clang/Basic/Specifiers.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/SmallVector.h"
43#include "llvm/ADT/StringRef.h"
44#include "llvm/Support/Allocator.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/raw_ostream.h"
47#include <cassert>
48#include <functional>
49#include <iterator>
50#include <memory>
51#include <optional>
52#include <string>
53#include <utility>
54#include <vector>
55
56using namespace clang;
57using namespace threadSafety;
58
59// Key method definition
60ThreadSafetyHandler::~ThreadSafetyHandler() = default;
61
62/// Issue a warning about an invalid lock expression
63static void warnInvalidLock(ThreadSafetyHandler &Handler,
64 const Expr *MutexExp, const NamedDecl *D,
65 const Expr *DeclExp, StringRef Kind) {
66 SourceLocation Loc;
67 if (DeclExp)
68 Loc = DeclExp->getExprLoc();
69
70 // FIXME: add a note about the attribute location in MutexExp or D
71 if (Loc.isValid())
72 Handler.handleInvalidLockExp(Loc);
73}
74
75namespace {
76
77/// A set of CapabilityExpr objects, which are compiled from thread safety
78/// attributes on a function.
79class CapExprSet : public SmallVector<CapabilityExpr, 4> {
80public:
81 /// Push M onto list, but discard duplicates.
82 void push_back_nodup(const CapabilityExpr &CapE) {
83 if (llvm::none_of(Range&: *this, P: [=](const CapabilityExpr &CapE2) {
84 return CapE.equals(other: CapE2);
85 }))
86 push_back(Elt: CapE);
87 }
88};
89
90class FactManager;
91class FactSet;
92
93/// This is a helper class that stores a fact that is known at a
94/// particular point in program execution. Currently, a fact is a capability,
95/// along with additional information, such as where it was acquired, whether
96/// it is exclusive or shared, etc.
97class FactEntry : public CapabilityExpr {
98public:
99 enum FactEntryKind { Lockable, ScopedLockable };
100
101 /// Where a fact comes from.
102 enum SourceKind {
103 Acquired, ///< The fact has been directly acquired.
104 Asserted, ///< The fact has been asserted to be held.
105 Declared, ///< The fact is assumed to be held by callers.
106 Managed, ///< The fact has been acquired through a scoped capability.
107 };
108
109private:
110 const FactEntryKind Kind : 8;
111
112 /// Exclusive or shared.
113 LockKind LKind : 8;
114
115 /// How it was acquired.
116 SourceKind Source : 8;
117
118 /// Where it was acquired.
119 SourceLocation AcquireLoc;
120
121public:
122 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
123 SourceLocation Loc, SourceKind Src)
124 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
125 virtual ~FactEntry() = default;
126
127 LockKind kind() const { return LKind; }
128 SourceLocation loc() const { return AcquireLoc; }
129 FactEntryKind getFactEntryKind() const { return Kind; }
130
131 bool asserted() const { return Source == Asserted; }
132 bool declared() const { return Source == Declared; }
133 bool managed() const { return Source == Managed; }
134
135 virtual void
136 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
137 SourceLocation JoinLoc, LockErrorKind LEK,
138 ThreadSafetyHandler &Handler) const = 0;
139 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
140 const FactEntry &entry,
141 ThreadSafetyHandler &Handler) const = 0;
142 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
143 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
144 bool FullyRemove,
145 ThreadSafetyHandler &Handler) const = 0;
146
147 // Return true if LKind >= LK, where exclusive > shared
148 bool isAtLeast(LockKind LK) const {
149 return (LKind == LK_Exclusive) || (LK == LK_Shared);
150 }
151};
152
153using FactID = unsigned short;
154
155/// FactManager manages the memory for all facts that are created during
156/// the analysis of a single routine.
157class FactManager {
158private:
159 std::vector<std::unique_ptr<const FactEntry>> Facts;
160
161public:
162 FactID newFact(std::unique_ptr<FactEntry> Entry) {
163 Facts.push_back(x: std::move(Entry));
164 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
165 "FactID space exhausted");
166 return static_cast<unsigned short>(Facts.size() - 1);
167 }
168
169 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
170};
171
172/// A FactSet is the set of facts that are known to be true at a
173/// particular program point. FactSets must be small, because they are
174/// frequently copied, and are thus implemented as a set of indices into a
175/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
176/// locks, so we can get away with doing a linear search for lookup. Note
177/// that a hashtable or map is inappropriate in this case, because lookups
178/// may involve partial pattern matches, rather than exact matches.
179class FactSet {
180private:
181 using FactVec = SmallVector<FactID, 4>;
182
183 FactVec FactIDs;
184
185public:
186 using iterator = FactVec::iterator;
187 using const_iterator = FactVec::const_iterator;
188
189 iterator begin() { return FactIDs.begin(); }
190 const_iterator begin() const { return FactIDs.begin(); }
191
192 iterator end() { return FactIDs.end(); }
193 const_iterator end() const { return FactIDs.end(); }
194
195 bool isEmpty() const { return FactIDs.size() == 0; }
196
197 // Return true if the set contains only negative facts
198 bool isEmpty(FactManager &FactMan) const {
199 for (const auto FID : *this) {
200 if (!FactMan[FID].negative())
201 return false;
202 }
203 return true;
204 }
205
206 void addLockByID(FactID ID) { FactIDs.push_back(Elt: ID); }
207
208 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
209 FactID F = FM.newFact(Entry: std::move(Entry));
210 FactIDs.push_back(Elt: F);
211 return F;
212 }
213
214 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
215 unsigned n = FactIDs.size();
216 if (n == 0)
217 return false;
218
219 for (unsigned i = 0; i < n-1; ++i) {
220 if (FM[FactIDs[i]].matches(other: CapE)) {
221 FactIDs[i] = FactIDs[n-1];
222 FactIDs.pop_back();
223 return true;
224 }
225 }
226 if (FM[FactIDs[n-1]].matches(other: CapE)) {
227 FactIDs.pop_back();
228 return true;
229 }
230 return false;
231 }
232
233 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
234 std::unique_ptr<FactEntry> Entry) {
235 if (It == end())
236 return std::nullopt;
237 FactID F = FM.newFact(Entry: std::move(Entry));
238 *It = F;
239 return F;
240 }
241
242 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
243 std::unique_ptr<FactEntry> Entry) {
244 return replaceLock(FM, It: findLockIter(FM, CapE), Entry: std::move(Entry));
245 }
246
247 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
248 return llvm::find_if(Range&: *this,
249 P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
250 }
251
252 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
253 auto I =
254 llvm::find_if(Range: *this, P: [&](FactID ID) { return FM[ID].matches(other: CapE); });
255 return I != end() ? &FM[*I] : nullptr;
256 }
257
258 const FactEntry *findLockUniv(FactManager &FM,
259 const CapabilityExpr &CapE) const {
260 auto I = llvm::find_if(
261 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
262 return I != end() ? &FM[*I] : nullptr;
263 }
264
265 const FactEntry *findPartialMatch(FactManager &FM,
266 const CapabilityExpr &CapE) const {
267 auto I = llvm::find_if(Range: *this, P: [&](FactID ID) -> bool {
268 return FM[ID].partiallyMatches(other: CapE);
269 });
270 return I != end() ? &FM[*I] : nullptr;
271 }
272
273 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
274 auto I = llvm::find_if(
275 Range: *this, P: [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
276 return I != end();
277 }
278};
279
280class ThreadSafetyAnalyzer;
281
282} // namespace
283
284namespace clang {
285namespace threadSafety {
286
287class BeforeSet {
288private:
289 using BeforeVect = SmallVector<const ValueDecl *, 4>;
290
291 struct BeforeInfo {
292 BeforeVect Vect;
293 int Visited = 0;
294
295 BeforeInfo() = default;
296 BeforeInfo(BeforeInfo &&) = default;
297 };
298
299 using BeforeMap =
300 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
301 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
302
303public:
304 BeforeSet() = default;
305
306 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
307 ThreadSafetyAnalyzer& Analyzer);
308
309 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
310 ThreadSafetyAnalyzer &Analyzer);
311
312 void checkBeforeAfter(const ValueDecl* Vd,
313 const FactSet& FSet,
314 ThreadSafetyAnalyzer& Analyzer,
315 SourceLocation Loc, StringRef CapKind);
316
317private:
318 BeforeMap BMap;
319 CycleMap CycMap;
320};
321
322} // namespace threadSafety
323} // namespace clang
324
325namespace {
326
327class LocalVariableMap;
328
329using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
330
331/// A side (entry or exit) of a CFG node.
332enum CFGBlockSide { CBS_Entry, CBS_Exit };
333
334/// CFGBlockInfo is a struct which contains all the information that is
335/// maintained for each block in the CFG. See LocalVariableMap for more
336/// information about the contexts.
337struct CFGBlockInfo {
338 // Lockset held at entry to block
339 FactSet EntrySet;
340
341 // Lockset held at exit from block
342 FactSet ExitSet;
343
344 // Context held at entry to block
345 LocalVarContext EntryContext;
346
347 // Context held at exit from block
348 LocalVarContext ExitContext;
349
350 // Location of first statement in block
351 SourceLocation EntryLoc;
352
353 // Location of last statement in block.
354 SourceLocation ExitLoc;
355
356 // Used to replay contexts later
357 unsigned EntryIndex;
358
359 // Is this block reachable?
360 bool Reachable = false;
361
362 const FactSet &getSet(CFGBlockSide Side) const {
363 return Side == CBS_Entry ? EntrySet : ExitSet;
364 }
365
366 SourceLocation getLocation(CFGBlockSide Side) const {
367 return Side == CBS_Entry ? EntryLoc : ExitLoc;
368 }
369
370private:
371 CFGBlockInfo(LocalVarContext EmptyCtx)
372 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
373
374public:
375 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
376};
377
378// A LocalVariableMap maintains a map from local variables to their currently
379// valid definitions. It provides SSA-like functionality when traversing the
380// CFG. Like SSA, each definition or assignment to a variable is assigned a
381// unique name (an integer), which acts as the SSA name for that definition.
382// The total set of names is shared among all CFG basic blocks.
383// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
384// with their SSA-names. Instead, we compute a Context for each point in the
385// code, which maps local variables to the appropriate SSA-name. This map
386// changes with each assignment.
387//
388// The map is computed in a single pass over the CFG. Subsequent analyses can
389// then query the map to find the appropriate Context for a statement, and use
390// that Context to look up the definitions of variables.
391class LocalVariableMap {
392public:
393 using Context = LocalVarContext;
394
395 /// A VarDefinition consists of an expression, representing the value of the
396 /// variable, along with the context in which that expression should be
397 /// interpreted. A reference VarDefinition does not itself contain this
398 /// information, but instead contains a pointer to a previous VarDefinition.
399 struct VarDefinition {
400 public:
401 friend class LocalVariableMap;
402
403 // The original declaration for this variable.
404 const NamedDecl *Dec;
405
406 // The expression for this variable, OR
407 const Expr *Exp = nullptr;
408
409 // Reference to another VarDefinition
410 unsigned Ref = 0;
411
412 // The map with which Exp should be interpreted.
413 Context Ctx;
414
415 bool isReference() const { return !Exp; }
416
417 private:
418 // Create ordinary variable definition
419 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
420 : Dec(D), Exp(E), Ctx(C) {}
421
422 // Create reference to previous definition
423 VarDefinition(const NamedDecl *D, unsigned R, Context C)
424 : Dec(D), Ref(R), Ctx(C) {}
425 };
426
427private:
428 Context::Factory ContextFactory;
429 std::vector<VarDefinition> VarDefinitions;
430 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
431
432public:
433 LocalVariableMap() {
434 // index 0 is a placeholder for undefined variables (aka phi-nodes).
435 VarDefinitions.push_back(x: VarDefinition(nullptr, 0u, getEmptyContext()));
436 }
437
438 /// Look up a definition, within the given context.
439 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
440 const unsigned *i = Ctx.lookup(K: D);
441 if (!i)
442 return nullptr;
443 assert(*i < VarDefinitions.size());
444 return &VarDefinitions[*i];
445 }
446
447 /// Look up the definition for D within the given context. Returns
448 /// NULL if the expression is not statically known. If successful, also
449 /// modifies Ctx to hold the context of the return Expr.
450 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
451 const unsigned *P = Ctx.lookup(K: D);
452 if (!P)
453 return nullptr;
454
455 unsigned i = *P;
456 while (i > 0) {
457 if (VarDefinitions[i].Exp) {
458 Ctx = VarDefinitions[i].Ctx;
459 return VarDefinitions[i].Exp;
460 }
461 i = VarDefinitions[i].Ref;
462 }
463 return nullptr;
464 }
465
466 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
467
468 /// Return the next context after processing S. This function is used by
469 /// clients of the class to get the appropriate context when traversing the
470 /// CFG. It must be called for every assignment or DeclStmt.
471 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
472 if (SavedContexts[CtxIndex+1].first == S) {
473 CtxIndex++;
474 Context Result = SavedContexts[CtxIndex].second;
475 return Result;
476 }
477 return C;
478 }
479
480 void dumpVarDefinitionName(unsigned i) {
481 if (i == 0) {
482 llvm::errs() << "Undefined";
483 return;
484 }
485 const NamedDecl *Dec = VarDefinitions[i].Dec;
486 if (!Dec) {
487 llvm::errs() << "<<NULL>>";
488 return;
489 }
490 Dec->printName(OS&: llvm::errs());
491 llvm::errs() << "." << i << " " << ((const void*) Dec);
492 }
493
494 /// Dumps an ASCII representation of the variable map to llvm::errs()
495 void dump() {
496 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
497 const Expr *Exp = VarDefinitions[i].Exp;
498 unsigned Ref = VarDefinitions[i].Ref;
499
500 dumpVarDefinitionName(i);
501 llvm::errs() << " = ";
502 if (Exp) Exp->dump();
503 else {
504 dumpVarDefinitionName(i: Ref);
505 llvm::errs() << "\n";
506 }
507 }
508 }
509
510 /// Dumps an ASCII representation of a Context to llvm::errs()
511 void dumpContext(Context C) {
512 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
513 const NamedDecl *D = I.getKey();
514 D->printName(OS&: llvm::errs());
515 llvm::errs() << " -> ";
516 dumpVarDefinitionName(i: I.getData());
517 llvm::errs() << "\n";
518 }
519 }
520
521 /// Builds the variable map.
522 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
523 std::vector<CFGBlockInfo> &BlockInfo);
524
525protected:
526 friend class VarMapBuilder;
527
528 // Get the current context index
529 unsigned getContextIndex() { return SavedContexts.size()-1; }
530
531 // Save the current context for later replay
532 void saveContext(const Stmt *S, Context C) {
533 SavedContexts.push_back(x: std::make_pair(x&: S, y&: C));
534 }
535
536 // Adds a new definition to the given context, and returns a new context.
537 // This method should be called when declaring a new variable.
538 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
539 assert(!Ctx.contains(D));
540 unsigned newID = VarDefinitions.size();
541 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
542 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
543 return NewCtx;
544 }
545
546 // Add a new reference to an existing definition.
547 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
548 unsigned newID = VarDefinitions.size();
549 Context NewCtx = ContextFactory.add(Old: Ctx, K: D, D: newID);
550 VarDefinitions.push_back(x: VarDefinition(D, i, Ctx));
551 return NewCtx;
552 }
553
554 // Updates a definition only if that definition is already in the map.
555 // This method should be called when assigning to an existing variable.
556 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
557 if (Ctx.contains(K: D)) {
558 unsigned newID = VarDefinitions.size();
559 Context NewCtx = ContextFactory.remove(Old: Ctx, K: D);
560 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: newID);
561 VarDefinitions.push_back(x: VarDefinition(D, Exp, Ctx));
562 return NewCtx;
563 }
564 return Ctx;
565 }
566
567 // Removes a definition from the context, but keeps the variable name
568 // as a valid variable. The index 0 is a placeholder for cleared definitions.
569 Context clearDefinition(const NamedDecl *D, Context Ctx) {
570 Context NewCtx = Ctx;
571 if (NewCtx.contains(K: D)) {
572 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
573 NewCtx = ContextFactory.add(Old: NewCtx, K: D, D: 0);
574 }
575 return NewCtx;
576 }
577
578 // Remove a definition entirely frmo the context.
579 Context removeDefinition(const NamedDecl *D, Context Ctx) {
580 Context NewCtx = Ctx;
581 if (NewCtx.contains(K: D)) {
582 NewCtx = ContextFactory.remove(Old: NewCtx, K: D);
583 }
584 return NewCtx;
585 }
586
587 Context intersectContexts(Context C1, Context C2);
588 Context createReferenceContext(Context C);
589 void intersectBackEdge(Context C1, Context C2);
590};
591
592} // namespace
593
594// This has to be defined after LocalVariableMap.
595CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
596 return CFGBlockInfo(M.getEmptyContext());
597}
598
599namespace {
600
601/// Visitor which builds a LocalVariableMap
602class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
603public:
604 LocalVariableMap* VMap;
605 LocalVariableMap::Context Ctx;
606
607 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
608 : VMap(VM), Ctx(C) {}
609
610 void VisitDeclStmt(const DeclStmt *S);
611 void VisitBinaryOperator(const BinaryOperator *BO);
612};
613
614} // namespace
615
616// Add new local variables to the variable map
617void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
618 bool modifiedCtx = false;
619 const DeclGroupRef DGrp = S->getDeclGroup();
620 for (const auto *D : DGrp) {
621 if (const auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
622 const Expr *E = VD->getInit();
623
624 // Add local variables with trivial type to the variable map
625 QualType T = VD->getType();
626 if (T.isTrivialType(Context: VD->getASTContext())) {
627 Ctx = VMap->addDefinition(VD, E, Ctx);
628 modifiedCtx = true;
629 }
630 }
631 }
632 if (modifiedCtx)
633 VMap->saveContext(S, C: Ctx);
634}
635
636// Update local variable definitions in variable map
637void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
638 if (!BO->isAssignmentOp())
639 return;
640
641 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
642
643 // Update the variable map and current context.
644 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: LHSExp)) {
645 const ValueDecl *VDec = DRE->getDecl();
646 if (Ctx.lookup(VDec)) {
647 if (BO->getOpcode() == BO_Assign)
648 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
649 else
650 // FIXME -- handle compound assignment operators
651 Ctx = VMap->clearDefinition(VDec, Ctx);
652 VMap->saveContext(BO, Ctx);
653 }
654 }
655}
656
657// Computes the intersection of two contexts. The intersection is the
658// set of variables which have the same definition in both contexts;
659// variables with different definitions are discarded.
660LocalVariableMap::Context
661LocalVariableMap::intersectContexts(Context C1, Context C2) {
662 Context Result = C1;
663 for (const auto &P : C1) {
664 const NamedDecl *Dec = P.first;
665 const unsigned *i2 = C2.lookup(K: Dec);
666 if (!i2) // variable doesn't exist on second path
667 Result = removeDefinition(D: Dec, Ctx: Result);
668 else if (*i2 != P.second) // variable exists, but has different definition
669 Result = clearDefinition(D: Dec, Ctx: Result);
670 }
671 return Result;
672}
673
674// For every variable in C, create a new variable that refers to the
675// definition in C. Return a new context that contains these new variables.
676// (We use this for a naive implementation of SSA on loop back-edges.)
677LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
678 Context Result = getEmptyContext();
679 for (const auto &P : C)
680 Result = addReference(D: P.first, i: P.second, Ctx: Result);
681 return Result;
682}
683
684// This routine also takes the intersection of C1 and C2, but it does so by
685// altering the VarDefinitions. C1 must be the result of an earlier call to
686// createReferenceContext.
687void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
688 for (const auto &P : C1) {
689 unsigned i1 = P.second;
690 VarDefinition *VDef = &VarDefinitions[i1];
691 assert(VDef->isReference());
692
693 const unsigned *i2 = C2.lookup(K: P.first);
694 if (!i2 || (*i2 != i1))
695 VDef->Ref = 0; // Mark this variable as undefined
696 }
697}
698
699// Traverse the CFG in topological order, so all predecessors of a block
700// (excluding back-edges) are visited before the block itself. At
701// each point in the code, we calculate a Context, which holds the set of
702// variable definitions which are visible at that point in execution.
703// Visible variables are mapped to their definitions using an array that
704// contains all definitions.
705//
706// At join points in the CFG, the set is computed as the intersection of
707// the incoming sets along each edge, E.g.
708//
709// { Context | VarDefinitions }
710// int x = 0; { x -> x1 | x1 = 0 }
711// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
712// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
713// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
714// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
715//
716// This is essentially a simpler and more naive version of the standard SSA
717// algorithm. Those definitions that remain in the intersection are from blocks
718// that strictly dominate the current block. We do not bother to insert proper
719// phi nodes, because they are not used in our analysis; instead, wherever
720// a phi node would be required, we simply remove that definition from the
721// context (E.g. x above).
722//
723// The initial traversal does not capture back-edges, so those need to be
724// handled on a separate pass. Whenever the first pass encounters an
725// incoming back edge, it duplicates the context, creating new definitions
726// that refer back to the originals. (These correspond to places where SSA
727// might have to insert a phi node.) On the second pass, these definitions are
728// set to NULL if the variable has changed on the back-edge (i.e. a phi
729// node was actually required.) E.g.
730//
731// { Context | VarDefinitions }
732// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
733// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
734// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
735// ... { y -> y1 | x3 = 2, x2 = 1, ... }
736void LocalVariableMap::traverseCFG(CFG *CFGraph,
737 const PostOrderCFGView *SortedGraph,
738 std::vector<CFGBlockInfo> &BlockInfo) {
739 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
740
741 for (const auto *CurrBlock : *SortedGraph) {
742 unsigned CurrBlockID = CurrBlock->getBlockID();
743 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
744
745 VisitedBlocks.insert(Block: CurrBlock);
746
747 // Calculate the entry context for the current block
748 bool HasBackEdges = false;
749 bool CtxInit = true;
750 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
751 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
752 // if *PI -> CurrBlock is a back edge, so skip it
753 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI)) {
754 HasBackEdges = true;
755 continue;
756 }
757
758 unsigned PrevBlockID = (*PI)->getBlockID();
759 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
760
761 if (CtxInit) {
762 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
763 CtxInit = false;
764 }
765 else {
766 CurrBlockInfo->EntryContext =
767 intersectContexts(C1: CurrBlockInfo->EntryContext,
768 C2: PrevBlockInfo->ExitContext);
769 }
770 }
771
772 // Duplicate the context if we have back-edges, so we can call
773 // intersectBackEdges later.
774 if (HasBackEdges)
775 CurrBlockInfo->EntryContext =
776 createReferenceContext(C: CurrBlockInfo->EntryContext);
777
778 // Create a starting context index for the current block
779 saveContext(S: nullptr, C: CurrBlockInfo->EntryContext);
780 CurrBlockInfo->EntryIndex = getContextIndex();
781
782 // Visit all the statements in the basic block.
783 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
784 for (const auto &BI : *CurrBlock) {
785 switch (BI.getKind()) {
786 case CFGElement::Statement: {
787 CFGStmt CS = BI.castAs<CFGStmt>();
788 VMapBuilder.Visit(CS.getStmt());
789 break;
790 }
791 default:
792 break;
793 }
794 }
795 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
796
797 // Mark variables on back edges as "unknown" if they've been changed.
798 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
799 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
800 // if CurrBlock -> *SI is *not* a back edge
801 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
802 continue;
803
804 CFGBlock *FirstLoopBlock = *SI;
805 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
806 Context LoopEnd = CurrBlockInfo->ExitContext;
807 intersectBackEdge(C1: LoopBegin, C2: LoopEnd);
808 }
809 }
810
811 // Put an extra entry at the end of the indexed context array
812 unsigned exitID = CFGraph->getExit().getBlockID();
813 saveContext(S: nullptr, C: BlockInfo[exitID].ExitContext);
814}
815
816/// Find the appropriate source locations to use when producing diagnostics for
817/// each block in the CFG.
818static void findBlockLocations(CFG *CFGraph,
819 const PostOrderCFGView *SortedGraph,
820 std::vector<CFGBlockInfo> &BlockInfo) {
821 for (const auto *CurrBlock : *SortedGraph) {
822 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
823
824 // Find the source location of the last statement in the block, if the
825 // block is not empty.
826 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
827 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
828 } else {
829 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
830 BE = CurrBlock->rend(); BI != BE; ++BI) {
831 // FIXME: Handle other CFGElement kinds.
832 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
833 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
834 break;
835 }
836 }
837 }
838
839 if (CurrBlockInfo->ExitLoc.isValid()) {
840 // This block contains at least one statement. Find the source location
841 // of the first statement in the block.
842 for (const auto &BI : *CurrBlock) {
843 // FIXME: Handle other CFGElement kinds.
844 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
845 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
846 break;
847 }
848 }
849 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
850 CurrBlock != &CFGraph->getExit()) {
851 // The block is empty, and has a single predecessor. Use its exit
852 // location.
853 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
854 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
855 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
856 // The block is empty, and has a single successor. Use its entry
857 // location.
858 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
859 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
860 }
861 }
862}
863
864namespace {
865
866class LockableFactEntry : public FactEntry {
867private:
868 /// Reentrancy depth: incremented when a capability has been acquired
869 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
870 /// capabilities.
871 unsigned int ReentrancyDepth = 0;
872
873public:
874 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
875 SourceKind Src = Acquired)
876 : FactEntry(Lockable, CE, LK, Loc, Src) {}
877
878 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
879
880 void
881 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
882 SourceLocation JoinLoc, LockErrorKind LEK,
883 ThreadSafetyHandler &Handler) const override {
884 if (!asserted() && !negative() && !isUniversal()) {
885 Handler.handleMutexHeldEndOfScope(Kind: getKind(), LockName: toString(), LocLocked: loc(), LocEndOfScope: JoinLoc,
886 LEK);
887 }
888 }
889
890 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
891 ThreadSafetyHandler &Handler) const override {
892 if (std::unique_ptr<FactEntry> RFact = tryReenter(ReenterKind: entry.kind())) {
893 // This capability has been reentrantly acquired.
894 FSet.replaceLock(FM&: FactMan, CapE: entry, Entry: std::move(RFact));
895 } else {
896 Handler.handleDoubleLock(Kind: entry.getKind(), LockName: entry.toString(), LocLocked: loc(),
897 LocDoubleLock: entry.loc());
898 }
899 }
900
901 void handleUnlock(FactSet &FSet, FactManager &FactMan,
902 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
903 bool FullyRemove,
904 ThreadSafetyHandler &Handler) const override {
905 FSet.removeLock(FM&: FactMan, CapE: Cp);
906
907 if (std::unique_ptr<FactEntry> RFact = leaveReentrant()) {
908 // This capability remains reentrantly acquired.
909 FSet.addLock(FM&: FactMan, Entry: std::move(RFact));
910 } else if (!Cp.negative()) {
911 FSet.addLock(FM&: FactMan, Entry: std::make_unique<LockableFactEntry>(
912 args: !Cp, args: LK_Exclusive, args&: UnlockLoc));
913 }
914 }
915
916 // Return an updated FactEntry if we can acquire this capability reentrant,
917 // nullptr otherwise.
918 std::unique_ptr<LockableFactEntry> tryReenter(LockKind ReenterKind) const {
919 if (!reentrant())
920 return nullptr;
921 if (kind() != ReenterKind)
922 return nullptr;
923 auto NewFact = std::make_unique<LockableFactEntry>(args: *this);
924 NewFact->ReentrancyDepth++;
925 return NewFact;
926 }
927
928 // Return an updated FactEntry if we are releasing a capability previously
929 // acquired reentrant, nullptr otherwise.
930 std::unique_ptr<LockableFactEntry> leaveReentrant() const {
931 if (!ReentrancyDepth)
932 return nullptr;
933 assert(reentrant());
934 auto NewFact = std::make_unique<LockableFactEntry>(args: *this);
935 NewFact->ReentrancyDepth--;
936 return NewFact;
937 }
938
939 static bool classof(const FactEntry *A) {
940 return A->getFactEntryKind() == Lockable;
941 }
942};
943
944class ScopedLockableFactEntry : public FactEntry {
945private:
946 enum UnderlyingCapabilityKind {
947 UCK_Acquired, ///< Any kind of acquired capability.
948 UCK_ReleasedShared, ///< Shared capability that was released.
949 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
950 };
951
952 struct UnderlyingCapability {
953 CapabilityExpr Cap;
954 UnderlyingCapabilityKind Kind;
955 };
956
957 SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
958
959public:
960 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
961 SourceKind Src)
962 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src) {}
963
964 CapExprSet getUnderlyingMutexes() const {
965 CapExprSet UnderlyingMutexesSet;
966 for (const UnderlyingCapability &UnderlyingMutex : UnderlyingMutexes)
967 UnderlyingMutexesSet.push_back(Elt: UnderlyingMutex.Cap);
968 return UnderlyingMutexesSet;
969 }
970
971 void addLock(const CapabilityExpr &M) {
972 UnderlyingMutexes.push_back(Elt: UnderlyingCapability{.Cap: M, .Kind: UCK_Acquired});
973 }
974
975 void addExclusiveUnlock(const CapabilityExpr &M) {
976 UnderlyingMutexes.push_back(Elt: UnderlyingCapability{.Cap: M, .Kind: UCK_ReleasedExclusive});
977 }
978
979 void addSharedUnlock(const CapabilityExpr &M) {
980 UnderlyingMutexes.push_back(Elt: UnderlyingCapability{.Cap: M, .Kind: UCK_ReleasedShared});
981 }
982
983 void
984 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
985 SourceLocation JoinLoc, LockErrorKind LEK,
986 ThreadSafetyHandler &Handler) const override {
987 if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction)
988 return;
989
990 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
991 const auto *Entry = FSet.findLock(FM&: FactMan, CapE: UnderlyingMutex.Cap);
992 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
993 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
994 // If this scoped lock manages another mutex, and if the underlying
995 // mutex is still/not held, then warn about the underlying mutex.
996 Handler.handleMutexHeldEndOfScope(Kind: UnderlyingMutex.Cap.getKind(),
997 LockName: UnderlyingMutex.Cap.toString(), LocLocked: loc(),
998 LocEndOfScope: JoinLoc, LEK);
999 }
1000 }
1001 }
1002
1003 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1004 ThreadSafetyHandler &Handler) const override {
1005 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
1006 if (UnderlyingMutex.Kind == UCK_Acquired)
1007 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind: entry.kind(), loc: entry.loc(),
1008 Handler: &Handler);
1009 else
1010 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: entry.loc(), Handler: &Handler);
1011 }
1012 }
1013
1014 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1015 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1016 bool FullyRemove,
1017 ThreadSafetyHandler &Handler) const override {
1018 assert(!Cp.negative() && "Managing object cannot be negative.");
1019 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
1020 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1021 // on double unlocking/locking if we're not destroying the scoped object.
1022 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1023 if (UnderlyingMutex.Kind == UCK_Acquired) {
1024 unlock(FSet, FactMan, Cp: UnderlyingMutex.Cap, loc: UnlockLoc, Handler: TSHandler);
1025 } else {
1026 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1027 ? LK_Shared
1028 : LK_Exclusive;
1029 lock(FSet, FactMan, Cp: UnderlyingMutex.Cap, kind, loc: UnlockLoc, Handler: TSHandler);
1030 }
1031 }
1032 if (FullyRemove)
1033 FSet.removeLock(FM&: FactMan, CapE: Cp);
1034 }
1035
1036 static bool classof(const FactEntry *A) {
1037 return A->getFactEntryKind() == ScopedLockable;
1038 }
1039
1040private:
1041 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1042 LockKind kind, SourceLocation loc,
1043 ThreadSafetyHandler *Handler) const {
1044 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1045 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1046 if (std::unique_ptr<FactEntry> RFact = Fact.tryReenter(ReenterKind: kind)) {
1047 // This capability has been reentrantly acquired.
1048 FSet.replaceLock(FM&: FactMan, It, Entry: std::move(RFact));
1049 } else if (Handler) {
1050 Handler->handleDoubleLock(Kind: Cp.getKind(), LockName: Cp.toString(), LocLocked: Fact.loc(), LocDoubleLock: loc);
1051 }
1052 } else {
1053 FSet.removeLock(FM&: FactMan, CapE: !Cp);
1054 FSet.addLock(FM&: FactMan,
1055 Entry: std::make_unique<LockableFactEntry>(args: Cp, args&: kind, args&: loc, args: Managed));
1056 }
1057 }
1058
1059 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1060 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1061 if (const auto It = FSet.findLockIter(FM&: FactMan, CapE: Cp); It != FSet.end()) {
1062 const auto &Fact = cast<LockableFactEntry>(Val: FactMan[*It]);
1063 if (std::unique_ptr<FactEntry> RFact = Fact.leaveReentrant()) {
1064 // This capability remains reentrantly acquired.
1065 FSet.replaceLock(FM&: FactMan, It, Entry: std::move(RFact));
1066 return;
1067 }
1068
1069 FSet.replaceLock(
1070 FM&: FactMan, It,
1071 Entry: std::make_unique<LockableFactEntry>(args: !Cp, args: LK_Exclusive, args&: loc));
1072 } else if (Handler) {
1073 SourceLocation PrevLoc;
1074 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1075 PrevLoc = Neg->loc();
1076 Handler->handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: loc, LocPreviousUnlock: PrevLoc);
1077 }
1078 }
1079};
1080
1081/// Class which implements the core thread safety analysis routines.
1082class ThreadSafetyAnalyzer {
1083 friend class BuildLockset;
1084 friend class threadSafety::BeforeSet;
1085
1086 llvm::BumpPtrAllocator Bpa;
1087 threadSafety::til::MemRegionRef Arena;
1088 threadSafety::SExprBuilder SxBuilder;
1089
1090 ThreadSafetyHandler &Handler;
1091 const FunctionDecl *CurrentFunction;
1092 LocalVariableMap LocalVarMap;
1093 // Maps constructed objects to `this` placeholder prior to initialization.
1094 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1095 FactManager FactMan;
1096 std::vector<CFGBlockInfo> BlockInfo;
1097
1098 BeforeSet *GlobalBeforeSet;
1099
1100public:
1101 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
1102 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
1103
1104 bool inCurrentScope(const CapabilityExpr &CapE);
1105
1106 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
1107 bool ReqAttr = false);
1108 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1109 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1110
1111 template <typename AttrType>
1112 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1113 const NamedDecl *D, til::SExpr *Self = nullptr);
1114
1115 template <class AttrType>
1116 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1117 const NamedDecl *D,
1118 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1119 Expr *BrE, bool Neg);
1120
1121 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1122 bool &Negate);
1123
1124 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1125 const CFGBlock* PredBlock,
1126 const CFGBlock *CurrBlock);
1127
1128 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1129 LockErrorKind EntryLEK);
1130
1131 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1132 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1133 LockErrorKind ExitLEK);
1134
1135 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1136 SourceLocation JoinLoc, LockErrorKind LEK) {
1137 intersectAndWarn(EntrySet, ExitSet, JoinLoc, EntryLEK: LEK, ExitLEK: LEK);
1138 }
1139
1140 void runAnalysis(AnalysisDeclContext &AC);
1141
1142 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1143 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1144 ProtectedOperationKind POK, til::LiteralPtr *Self,
1145 SourceLocation Loc);
1146 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1147 Expr *MutexExp, til::LiteralPtr *Self,
1148 SourceLocation Loc);
1149
1150 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1151 ProtectedOperationKind POK);
1152 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1153 ProtectedOperationKind POK);
1154};
1155
1156} // namespace
1157
1158/// Process acquired_before and acquired_after attributes on Vd.
1159BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1160 ThreadSafetyAnalyzer& Analyzer) {
1161 // Create a new entry for Vd.
1162 BeforeInfo *Info = nullptr;
1163 {
1164 // Keep InfoPtr in its own scope in case BMap is modified later and the
1165 // reference becomes invalid.
1166 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1167 if (!InfoPtr)
1168 InfoPtr.reset(p: new BeforeInfo());
1169 Info = InfoPtr.get();
1170 }
1171
1172 for (const auto *At : Vd->attrs()) {
1173 switch (At->getKind()) {
1174 case attr::AcquiredBefore: {
1175 const auto *A = cast<AcquiredBeforeAttr>(At);
1176
1177 // Read exprs from the attribute, and add them to BeforeVect.
1178 for (const auto *Arg : A->args()) {
1179 CapabilityExpr Cp =
1180 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1181 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1182 Info->Vect.push_back(Cpvd);
1183 const auto It = BMap.find(Cpvd);
1184 if (It == BMap.end())
1185 insertAttrExprs(Cpvd, Analyzer);
1186 }
1187 }
1188 break;
1189 }
1190 case attr::AcquiredAfter: {
1191 const auto *A = cast<AcquiredAfterAttr>(At);
1192
1193 // Read exprs from the attribute, and add them to BeforeVect.
1194 for (const auto *Arg : A->args()) {
1195 CapabilityExpr Cp =
1196 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1197 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1198 // Get entry for mutex listed in attribute
1199 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1200 ArgInfo->Vect.push_back(Vd);
1201 }
1202 }
1203 break;
1204 }
1205 default:
1206 break;
1207 }
1208 }
1209
1210 return Info;
1211}
1212
1213BeforeSet::BeforeInfo *
1214BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1215 ThreadSafetyAnalyzer &Analyzer) {
1216 auto It = BMap.find(Val: Vd);
1217 BeforeInfo *Info = nullptr;
1218 if (It == BMap.end())
1219 Info = insertAttrExprs(Vd, Analyzer);
1220 else
1221 Info = It->second.get();
1222 assert(Info && "BMap contained nullptr?");
1223 return Info;
1224}
1225
1226/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1227void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1228 const FactSet& FSet,
1229 ThreadSafetyAnalyzer& Analyzer,
1230 SourceLocation Loc, StringRef CapKind) {
1231 SmallVector<BeforeInfo*, 8> InfoVect;
1232
1233 // Do a depth-first traversal of Vd.
1234 // Return true if there are cycles.
1235 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1236 if (!Vd)
1237 return false;
1238
1239 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1240
1241 if (Info->Visited == 1)
1242 return true;
1243
1244 if (Info->Visited == 2)
1245 return false;
1246
1247 if (Info->Vect.empty())
1248 return false;
1249
1250 InfoVect.push_back(Elt: Info);
1251 Info->Visited = 1;
1252 for (const auto *Vdb : Info->Vect) {
1253 // Exclude mutexes in our immediate before set.
1254 if (FSet.containsMutexDecl(FM&: Analyzer.FactMan, Vd: Vdb)) {
1255 StringRef L1 = StartVd->getName();
1256 StringRef L2 = Vdb->getName();
1257 Analyzer.Handler.handleLockAcquiredBefore(Kind: CapKind, L1Name: L1, L2Name: L2, Loc);
1258 }
1259 // Transitively search other before sets, and warn on cycles.
1260 if (traverse(Vdb)) {
1261 if (CycMap.try_emplace(Key: Vd, Args: true).second) {
1262 StringRef L1 = Vd->getName();
1263 Analyzer.Handler.handleBeforeAfterCycle(L1Name: L1, Loc: Vd->getLocation());
1264 }
1265 }
1266 }
1267 Info->Visited = 2;
1268 return false;
1269 };
1270
1271 traverse(StartVd);
1272
1273 for (auto *Info : InfoVect)
1274 Info->Visited = 0;
1275}
1276
1277/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1278static const ValueDecl *getValueDecl(const Expr *Exp) {
1279 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Exp))
1280 return getValueDecl(CE->getSubExpr());
1281
1282 if (const auto *DR = dyn_cast<DeclRefExpr>(Val: Exp))
1283 return DR->getDecl();
1284
1285 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp))
1286 return ME->getMemberDecl();
1287
1288 return nullptr;
1289}
1290
1291bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1292 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1293 assert(SExp && "Null expressions should be ignored");
1294
1295 if (const auto *LP = dyn_cast<til::LiteralPtr>(Val: SExp)) {
1296 const ValueDecl *VD = LP->clangDecl();
1297 // Variables defined in a function are always inaccessible.
1298 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1299 return false;
1300 // For now we consider static class members to be inaccessible.
1301 if (isa<CXXRecordDecl>(VD->getDeclContext()))
1302 return false;
1303 // Global variables are always in scope.
1304 return true;
1305 }
1306
1307 // Members are in scope from methods of the same class.
1308 if (const auto *P = dyn_cast<til::Project>(Val: SExp)) {
1309 if (!isa_and_nonnull<CXXMethodDecl>(Val: CurrentFunction))
1310 return false;
1311 const ValueDecl *VD = P->clangDecl();
1312 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1313 }
1314
1315 return false;
1316}
1317
1318/// Add a new lock to the lockset, warning if the lock is already there.
1319/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1320void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
1321 std::unique_ptr<FactEntry> Entry,
1322 bool ReqAttr) {
1323 if (Entry->shouldIgnore())
1324 return;
1325
1326 if (!ReqAttr && !Entry->negative()) {
1327 // look for the negative capability, and remove it from the fact set.
1328 CapabilityExpr NegC = !*Entry;
1329 const FactEntry *Nen = FSet.findLock(FM&: FactMan, CapE: NegC);
1330 if (Nen) {
1331 FSet.removeLock(FM&: FactMan, CapE: NegC);
1332 }
1333 else {
1334 if (inCurrentScope(CapE: *Entry) && !Entry->asserted())
1335 Handler.handleNegativeNotHeld(Kind: Entry->getKind(), LockName: Entry->toString(),
1336 Neg: NegC.toString(), Loc: Entry->loc());
1337 }
1338 }
1339
1340 // Check before/after constraints
1341 if (Handler.issueBetaWarnings() &&
1342 !Entry->asserted() && !Entry->declared()) {
1343 GlobalBeforeSet->checkBeforeAfter(StartVd: Entry->valueDecl(), FSet, Analyzer&: *this,
1344 Loc: Entry->loc(), CapKind: Entry->getKind());
1345 }
1346
1347 if (const FactEntry *Cp = FSet.findLock(FM&: FactMan, CapE: *Entry)) {
1348 if (!Entry->asserted())
1349 Cp->handleLock(FSet, FactMan, entry: *Entry, Handler);
1350 } else {
1351 FSet.addLock(FM&: FactMan, Entry: std::move(Entry));
1352 }
1353}
1354
1355/// Remove a lock from the lockset, warning if the lock is not there.
1356/// \param UnlockLoc The source location of the unlock (only used in error msg)
1357void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1358 SourceLocation UnlockLoc,
1359 bool FullyRemove, LockKind ReceivedKind) {
1360 if (Cp.shouldIgnore())
1361 return;
1362
1363 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1364 if (!LDat) {
1365 SourceLocation PrevLoc;
1366 if (const FactEntry *Neg = FSet.findLock(FM&: FactMan, CapE: !Cp))
1367 PrevLoc = Neg->loc();
1368 Handler.handleUnmatchedUnlock(Kind: Cp.getKind(), LockName: Cp.toString(), Loc: UnlockLoc,
1369 LocPreviousUnlock: PrevLoc);
1370 return;
1371 }
1372
1373 // Generic lock removal doesn't care about lock kind mismatches, but
1374 // otherwise diagnose when the lock kinds are mismatched.
1375 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1376 Handler.handleIncorrectUnlockKind(Kind: Cp.getKind(), LockName: Cp.toString(), Expected: LDat->kind(),
1377 Received: ReceivedKind, LocLocked: LDat->loc(), LocUnlock: UnlockLoc);
1378 }
1379
1380 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1381}
1382
1383/// Extract the list of mutexIDs from the attribute on an expression,
1384/// and push them onto Mtxs, discarding any duplicates.
1385template <typename AttrType>
1386void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1387 const Expr *Exp, const NamedDecl *D,
1388 til::SExpr *Self) {
1389 if (Attr->args_size() == 0) {
1390 // The mutex held is the "this" object.
1391 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: nullptr, D, DeclExp: Exp, Self);
1392 if (Cp.isInvalid()) {
1393 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1394 return;
1395 }
1396 //else
1397 if (!Cp.shouldIgnore())
1398 Mtxs.push_back_nodup(CapE: Cp);
1399 return;
1400 }
1401
1402 for (const auto *Arg : Attr->args()) {
1403 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1404 if (Cp.isInvalid()) {
1405 warnInvalidLock(Handler, MutexExp: nullptr, D, DeclExp: Exp, Kind: Cp.getKind());
1406 continue;
1407 }
1408 //else
1409 if (!Cp.shouldIgnore())
1410 Mtxs.push_back_nodup(CapE: Cp);
1411 }
1412}
1413
1414/// Extract the list of mutexIDs from a trylock attribute. If the
1415/// trylock applies to the given edge, then push them onto Mtxs, discarding
1416/// any duplicates.
1417template <class AttrType>
1418void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1419 const Expr *Exp, const NamedDecl *D,
1420 const CFGBlock *PredBlock,
1421 const CFGBlock *CurrBlock,
1422 Expr *BrE, bool Neg) {
1423 // Find out which branch has the lock
1424 bool branch = false;
1425 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(Val: BrE))
1426 branch = BLE->getValue();
1427 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(Val: BrE))
1428 branch = ILE->getValue().getBoolValue();
1429
1430 int branchnum = branch ? 0 : 1;
1431 if (Neg)
1432 branchnum = !branchnum;
1433
1434 // If we've taken the trylock branch, then add the lock
1435 int i = 0;
1436 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1437 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1438 if (*SI == CurrBlock && i == branchnum)
1439 getMutexIDs(Mtxs, Attr, Exp, D);
1440 }
1441}
1442
1443static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1444 if (isa<CXXNullPtrLiteralExpr>(Val: E) || isa<GNUNullExpr>(Val: E)) {
1445 TCond = false;
1446 return true;
1447 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(Val: E)) {
1448 TCond = BLE->getValue();
1449 return true;
1450 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(Val: E)) {
1451 TCond = ILE->getValue().getBoolValue();
1452 return true;
1453 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(Val: E))
1454 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1455 return false;
1456}
1457
1458// If Cond can be traced back to a function call, return the call expression.
1459// The negate variable should be called with false, and will be set to true
1460// if the function call is negated, e.g. if (!mu.tryLock(...))
1461const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1462 LocalVarContext C,
1463 bool &Negate) {
1464 if (!Cond)
1465 return nullptr;
1466
1467 if (const auto *CallExp = dyn_cast<CallExpr>(Val: Cond)) {
1468 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1469 return getTrylockCallExpr(CallExp->getArg(Arg: 0), C, Negate);
1470 return CallExp;
1471 }
1472 else if (const auto *PE = dyn_cast<ParenExpr>(Val: Cond))
1473 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1474 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: Cond))
1475 return getTrylockCallExpr(Cond: CE->getSubExpr(), C, Negate);
1476 else if (const auto *FE = dyn_cast<FullExpr>(Val: Cond))
1477 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1478 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Cond)) {
1479 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1480 return getTrylockCallExpr(E, C, Negate);
1481 }
1482 else if (const auto *UOP = dyn_cast<UnaryOperator>(Val: Cond)) {
1483 if (UOP->getOpcode() == UO_LNot) {
1484 Negate = !Negate;
1485 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1486 }
1487 return nullptr;
1488 }
1489 else if (const auto *BOP = dyn_cast<BinaryOperator>(Val: Cond)) {
1490 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1491 if (BOP->getOpcode() == BO_NE)
1492 Negate = !Negate;
1493
1494 bool TCond = false;
1495 if (getStaticBooleanValue(E: BOP->getRHS(), TCond)) {
1496 if (!TCond) Negate = !Negate;
1497 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1498 }
1499 TCond = false;
1500 if (getStaticBooleanValue(E: BOP->getLHS(), TCond)) {
1501 if (!TCond) Negate = !Negate;
1502 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1503 }
1504 return nullptr;
1505 }
1506 if (BOP->getOpcode() == BO_LAnd) {
1507 // LHS must have been evaluated in a different block.
1508 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1509 }
1510 if (BOP->getOpcode() == BO_LOr)
1511 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1512 return nullptr;
1513 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Val: Cond)) {
1514 bool TCond, FCond;
1515 if (getStaticBooleanValue(E: COP->getTrueExpr(), TCond) &&
1516 getStaticBooleanValue(E: COP->getFalseExpr(), TCond&: FCond)) {
1517 if (TCond && !FCond)
1518 return getTrylockCallExpr(COP->getCond(), C, Negate);
1519 if (!TCond && FCond) {
1520 Negate = !Negate;
1521 return getTrylockCallExpr(COP->getCond(), C, Negate);
1522 }
1523 }
1524 }
1525 return nullptr;
1526}
1527
1528/// Find the lockset that holds on the edge between PredBlock
1529/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1530/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1531void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1532 const FactSet &ExitSet,
1533 const CFGBlock *PredBlock,
1534 const CFGBlock *CurrBlock) {
1535 Result = ExitSet;
1536
1537 const Stmt *Cond = PredBlock->getTerminatorCondition();
1538 // We don't acquire try-locks on ?: branches, only when its result is used.
1539 if (!Cond || isa<ConditionalOperator>(Val: PredBlock->getTerminatorStmt()))
1540 return;
1541
1542 bool Negate = false;
1543 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1544 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1545
1546 const auto *Exp = getTrylockCallExpr(Cond, C: LVarCtx, Negate);
1547 if (!Exp)
1548 return;
1549
1550 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
1551 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1552 return;
1553
1554 CapExprSet ExclusiveLocksToAdd;
1555 CapExprSet SharedLocksToAdd;
1556
1557 // If the condition is a call to a Trylock function, then grab the attributes
1558 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1559 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1560 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1561 Negate);
1562
1563 // Add and remove locks.
1564 SourceLocation Loc = Exp->getExprLoc();
1565 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1566 addLock(FSet&: Result, Entry: std::make_unique<LockableFactEntry>(args: ExclusiveLockToAdd,
1567 args: LK_Exclusive, args&: Loc));
1568 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1569 addLock(FSet&: Result, Entry: std::make_unique<LockableFactEntry>(args: SharedLockToAdd,
1570 args: LK_Shared, args&: Loc));
1571}
1572
1573namespace {
1574
1575/// We use this class to visit different types of expressions in
1576/// CFGBlocks, and build up the lockset.
1577/// An expression may cause us to add or remove locks from the lockset, or else
1578/// output error messages related to missing locks.
1579/// FIXME: In future, we may be able to not inherit from a visitor.
1580class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1581 friend class ThreadSafetyAnalyzer;
1582
1583 ThreadSafetyAnalyzer *Analyzer;
1584 FactSet FSet;
1585 // The fact set for the function on exit.
1586 const FactSet &FunctionExitFSet;
1587 LocalVariableMap::Context LVarCtx;
1588 unsigned CtxIndex;
1589
1590 // helper functions
1591
1592 void checkAccess(const Expr *Exp, AccessKind AK,
1593 ProtectedOperationKind POK = POK_VarAccess) {
1594 Analyzer->checkAccess(FSet, Exp, AK, POK);
1595 }
1596 void checkPtAccess(const Expr *Exp, AccessKind AK,
1597 ProtectedOperationKind POK = POK_VarAccess) {
1598 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1599 }
1600
1601 void handleCall(const Expr *Exp, const NamedDecl *D,
1602 til::LiteralPtr *Self = nullptr,
1603 SourceLocation Loc = SourceLocation());
1604 void examineArguments(const FunctionDecl *FD,
1605 CallExpr::const_arg_iterator ArgBegin,
1606 CallExpr::const_arg_iterator ArgEnd,
1607 bool SkipFirstParam = false);
1608
1609public:
1610 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1611 const FactSet &FunctionExitFSet)
1612 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1613 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1614 CtxIndex(Info.EntryIndex) {}
1615
1616 void VisitUnaryOperator(const UnaryOperator *UO);
1617 void VisitBinaryOperator(const BinaryOperator *BO);
1618 void VisitCastExpr(const CastExpr *CE);
1619 void VisitCallExpr(const CallExpr *Exp);
1620 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1621 void VisitDeclStmt(const DeclStmt *S);
1622 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1623 void VisitReturnStmt(const ReturnStmt *S);
1624};
1625
1626} // namespace
1627
1628/// Warn if the LSet does not contain a lock sufficient to protect access
1629/// of at least the passed in AccessKind.
1630void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1631 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1632 Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self,
1633 SourceLocation Loc) {
1634 LockKind LK = getLockKindFromAccessKind(AK);
1635 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1636 if (Cp.isInvalid()) {
1637 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1638 return;
1639 } else if (Cp.shouldIgnore()) {
1640 return;
1641 }
1642
1643 if (Cp.negative()) {
1644 // Negative capabilities act like locks excluded
1645 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: !Cp);
1646 if (LDat) {
1647 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1648 LockName: (!Cp).toString(), Loc);
1649 return;
1650 }
1651
1652 // If this does not refer to a negative capability in the same class,
1653 // then stop here.
1654 if (!inCurrentScope(CapE: Cp))
1655 return;
1656
1657 // Otherwise the negative requirement must be propagated to the caller.
1658 LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1659 if (!LDat) {
1660 Handler.handleNegativeNotHeld(D, LockName: Cp.toString(), Loc);
1661 }
1662 return;
1663 }
1664
1665 const FactEntry *LDat = FSet.findLockUniv(FM&: FactMan, CapE: Cp);
1666 bool NoError = true;
1667 if (!LDat) {
1668 // No exact match found. Look for a partial match.
1669 LDat = FSet.findPartialMatch(FM&: FactMan, CapE: Cp);
1670 if (LDat) {
1671 // Warn that there's no precise match.
1672 std::string PartMatchStr = LDat->toString();
1673 StringRef PartMatchName(PartMatchStr);
1674 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc,
1675 PossibleMatch: &PartMatchName);
1676 } else {
1677 // Warn that there's no match at all.
1678 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1679 }
1680 NoError = false;
1681 }
1682 // Make sure the mutex we found is the right kind.
1683 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1684 Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK, LockName: Cp.toString(), LK, Loc);
1685 }
1686}
1687
1688/// Warn if the LSet contains the given lock.
1689void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1690 const NamedDecl *D, const Expr *Exp,
1691 Expr *MutexExp,
1692 til::LiteralPtr *Self,
1693 SourceLocation Loc) {
1694 CapabilityExpr Cp = SxBuilder.translateAttrExpr(AttrExp: MutexExp, D, DeclExp: Exp, Self);
1695 if (Cp.isInvalid()) {
1696 warnInvalidLock(Handler, MutexExp, D, DeclExp: Exp, Kind: Cp.getKind());
1697 return;
1698 } else if (Cp.shouldIgnore()) {
1699 return;
1700 }
1701
1702 const FactEntry *LDat = FSet.findLock(FM&: FactMan, CapE: Cp);
1703 if (LDat) {
1704 Handler.handleFunExcludesLock(Kind: Cp.getKind(), FunName: D->getNameAsString(),
1705 LockName: Cp.toString(), Loc);
1706 }
1707}
1708
1709/// Checks guarded_by and pt_guarded_by attributes.
1710/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1711/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1712/// Similarly, we check if the access is to an expression that dereferences
1713/// a pointer marked with pt_guarded_by.
1714void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1715 AccessKind AK,
1716 ProtectedOperationKind POK) {
1717 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1718
1719 SourceLocation Loc = Exp->getExprLoc();
1720
1721 // Local variables of reference type cannot be re-assigned;
1722 // map them to their initializer.
1723 while (const auto *DRE = dyn_cast<DeclRefExpr>(Val: Exp)) {
1724 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1725 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1726 if (const auto *E = VD->getInit()) {
1727 // Guard against self-initialization. e.g., int &i = i;
1728 if (E == Exp)
1729 break;
1730 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1731 continue;
1732 }
1733 }
1734 break;
1735 }
1736
1737 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1738 // For dereferences
1739 if (UO->getOpcode() == UO_Deref)
1740 checkPtAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1741 return;
1742 }
1743
1744 if (const auto *BO = dyn_cast<BinaryOperator>(Val: Exp)) {
1745 switch (BO->getOpcode()) {
1746 case BO_PtrMemD: // .*
1747 return checkAccess(FSet, Exp: BO->getLHS(), AK, POK);
1748 case BO_PtrMemI: // ->*
1749 return checkPtAccess(FSet, Exp: BO->getLHS(), AK, POK);
1750 default:
1751 return;
1752 }
1753 }
1754
1755 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Val: Exp)) {
1756 checkPtAccess(FSet, Exp: AE->getLHS(), AK, POK);
1757 return;
1758 }
1759
1760 if (const auto *ME = dyn_cast<MemberExpr>(Val: Exp)) {
1761 if (ME->isArrow())
1762 checkPtAccess(FSet, Exp: ME->getBase(), AK, POK);
1763 else
1764 checkAccess(FSet, Exp: ME->getBase(), AK, POK);
1765 }
1766
1767 const ValueDecl *D = getValueDecl(Exp);
1768 if (!D || !D->hasAttrs())
1769 return;
1770
1771 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1772 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1773 }
1774
1775 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1776 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1777}
1778
1779/// Checks pt_guarded_by and pt_guarded_var attributes.
1780/// POK is the same operationKind that was passed to checkAccess.
1781void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1782 AccessKind AK,
1783 ProtectedOperationKind POK) {
1784 // Strip off paren- and cast-expressions, checking if we encounter any other
1785 // operator that should be delegated to checkAccess() instead.
1786 while (true) {
1787 if (const auto *PE = dyn_cast<ParenExpr>(Val: Exp)) {
1788 Exp = PE->getSubExpr();
1789 continue;
1790 }
1791 if (const auto *CE = dyn_cast<CastExpr>(Val: Exp)) {
1792 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1793 // If it's an actual array, and not a pointer, then it's elements
1794 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1795 checkAccess(FSet, Exp: CE->getSubExpr(), AK, POK);
1796 return;
1797 }
1798 Exp = CE->getSubExpr();
1799 continue;
1800 }
1801 break;
1802 }
1803
1804 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Exp)) {
1805 if (UO->getOpcode() == UO_AddrOf) {
1806 // Pointer access via pointer taken of variable, so the dereferenced
1807 // variable is not actually a pointer.
1808 checkAccess(FSet, Exp: UO->getSubExpr(), AK, POK);
1809 return;
1810 }
1811 }
1812
1813 // Pass by reference/pointer warnings are under a different flag.
1814 ProtectedOperationKind PtPOK = POK_VarDereference;
1815 switch (POK) {
1816 case POK_PassByRef:
1817 PtPOK = POK_PtPassByRef;
1818 break;
1819 case POK_ReturnByRef:
1820 PtPOK = POK_PtReturnByRef;
1821 break;
1822 case POK_PassPointer:
1823 PtPOK = POK_PtPassPointer;
1824 break;
1825 case POK_ReturnPointer:
1826 PtPOK = POK_PtReturnPointer;
1827 break;
1828 default:
1829 break;
1830 }
1831
1832 const ValueDecl *D = getValueDecl(Exp);
1833 if (!D || !D->hasAttrs())
1834 return;
1835
1836 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1837 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1838
1839 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1840 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1841 Exp->getExprLoc());
1842}
1843
1844/// Process a function call, method call, constructor call,
1845/// or destructor call. This involves looking at the attributes on the
1846/// corresponding function/method/constructor/destructor, issuing warnings,
1847/// and updating the locksets accordingly.
1848///
1849/// FIXME: For classes annotated with one of the guarded annotations, we need
1850/// to treat const method calls as reads and non-const method calls as writes,
1851/// and check that the appropriate locks are held. Non-const method calls with
1852/// the same signature as const method calls can be also treated as reads.
1853///
1854/// \param Exp The call expression.
1855/// \param D The callee declaration.
1856/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
1857/// of an implicitly called cleanup function.
1858/// \param Loc If \p Exp = nullptr, the location.
1859void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
1860 til::LiteralPtr *Self, SourceLocation Loc) {
1861 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1862 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1863 CapExprSet ScopedReqsAndExcludes;
1864
1865 // Figure out if we're constructing an object of scoped lockable class
1866 CapabilityExpr Scp;
1867 if (Exp) {
1868 assert(!Self);
1869 const auto *TagT = Exp->getType()->getAs<TagType>();
1870 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
1871 til::LiteralPtr *Placeholder =
1872 Analyzer->SxBuilder.createVariable(VD: nullptr);
1873 [[maybe_unused]] auto inserted =
1874 Analyzer->ConstructedObjects.insert(KV: {Exp, Placeholder});
1875 assert(inserted.second && "Are we visiting the same expression again?");
1876 if (isa<CXXConstructExpr>(Val: Exp))
1877 Self = Placeholder;
1878 if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
1879 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
1880 }
1881
1882 assert(Loc.isInvalid());
1883 Loc = Exp->getExprLoc();
1884 }
1885
1886 for(const Attr *At : D->attrs()) {
1887 switch (At->getKind()) {
1888 // When we encounter a lock function, we need to add the lock to our
1889 // lockset.
1890 case attr::AcquireCapability: {
1891 const auto *A = cast<AcquireCapabilityAttr>(At);
1892 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1893 : ExclusiveLocksToAdd,
1894 A, Exp, D, Self);
1895 break;
1896 }
1897
1898 // An assert will add a lock to the lockset, but will not generate
1899 // a warning if it is already there, and will not generate a warning
1900 // if it is not removed.
1901 case attr::AssertCapability: {
1902 const auto *A = cast<AssertCapabilityAttr>(At);
1903 CapExprSet AssertLocks;
1904 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1905 for (const auto &AssertLock : AssertLocks)
1906 Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
1907 AssertLock,
1908 A->isShared() ? LK_Shared : LK_Exclusive,
1909 Loc, FactEntry::Asserted));
1910 break;
1911 }
1912
1913 // When we encounter an unlock function, we need to remove unlocked
1914 // mutexes from the lockset, and flag a warning if they are not there.
1915 case attr::ReleaseCapability: {
1916 const auto *A = cast<ReleaseCapabilityAttr>(At);
1917 if (A->isGeneric())
1918 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
1919 else if (A->isShared())
1920 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
1921 else
1922 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
1923 break;
1924 }
1925
1926 case attr::RequiresCapability: {
1927 const auto *A = cast<RequiresCapabilityAttr>(At);
1928 for (auto *Arg : A->args()) {
1929 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
1930 A->isShared() ? AK_Read : AK_Written,
1931 Arg, POK_FunctionCall, Self, Loc);
1932 // use for adopting a lock
1933 if (!Scp.shouldIgnore())
1934 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1935 }
1936 break;
1937 }
1938
1939 case attr::LocksExcluded: {
1940 const auto *A = cast<LocksExcludedAttr>(At);
1941 for (auto *Arg : A->args()) {
1942 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
1943 // use for deferring a lock
1944 if (!Scp.shouldIgnore())
1945 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1946 }
1947 break;
1948 }
1949
1950 // Ignore attributes unrelated to thread-safety
1951 default:
1952 break;
1953 }
1954 }
1955
1956 std::optional<CallExpr::const_arg_range> Args;
1957 if (Exp) {
1958 if (const auto *CE = dyn_cast<CallExpr>(Val: Exp))
1959 Args = CE->arguments();
1960 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Val: Exp))
1961 Args = CE->arguments();
1962 else
1963 llvm_unreachable("Unknown call kind");
1964 }
1965 const auto *CalledFunction = dyn_cast<FunctionDecl>(Val: D);
1966 if (CalledFunction && Args.has_value()) {
1967 for (auto [Param, Arg] : zip(t: CalledFunction->parameters(), u&: *Args)) {
1968 CapExprSet DeclaredLocks;
1969 for (const Attr *At : Param->attrs()) {
1970 switch (At->getKind()) {
1971 case attr::AcquireCapability: {
1972 const auto *A = cast<AcquireCapabilityAttr>(At);
1973 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1974 : ExclusiveLocksToAdd,
1975 A, Exp, D, Self);
1976 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
1977 break;
1978 }
1979
1980 case attr::ReleaseCapability: {
1981 const auto *A = cast<ReleaseCapabilityAttr>(At);
1982 if (A->isGeneric())
1983 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
1984 else if (A->isShared())
1985 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
1986 else
1987 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
1988 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
1989 break;
1990 }
1991
1992 case attr::RequiresCapability: {
1993 const auto *A = cast<RequiresCapabilityAttr>(At);
1994 for (auto *Arg : A->args())
1995 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
1996 A->isShared() ? AK_Read : AK_Written,
1997 Arg, POK_FunctionCall, Self, Loc);
1998 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
1999 break;
2000 }
2001
2002 case attr::LocksExcluded: {
2003 const auto *A = cast<LocksExcludedAttr>(At);
2004 for (auto *Arg : A->args())
2005 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2006 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2007 break;
2008 }
2009
2010 default:
2011 break;
2012 }
2013 }
2014 if (DeclaredLocks.empty())
2015 continue;
2016 CapabilityExpr Cp(Analyzer->SxBuilder.translate(S: Arg, Ctx: nullptr),
2017 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2018 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2019 Cp.isInvalid() && CBTE) {
2020 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2021 Object != Analyzer->ConstructedObjects.end())
2022 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2023 /*Reentrant=*/false);
2024 }
2025 const FactEntry *Fact = FSet.findLock(FM&: Analyzer->FactMan, CapE: Cp);
2026 if (!Fact) {
2027 Analyzer->Handler.handleMutexNotHeld(Kind: Cp.getKind(), D, POK: POK_FunctionCall,
2028 LockName: Cp.toString(), LK: LK_Exclusive,
2029 Loc: Exp->getExprLoc());
2030 continue;
2031 }
2032 const auto *Scope = cast<ScopedLockableFactEntry>(Val: Fact);
2033 for (const auto &[a, b] :
2034 zip_longest(t&: DeclaredLocks, u: Scope->getUnderlyingMutexes())) {
2035 if (!a.has_value()) {
2036 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2037 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2038 Kind: b.value().getKind(), Actual: b.value().toString());
2039 } else if (!b.has_value()) {
2040 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2041 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2042 Kind: a.value().getKind(), Expected: a.value().toString());
2043 } else if (!a.value().equals(other: b.value())) {
2044 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2045 Loc: Exp->getExprLoc(), DLoc: D->getLocation(), ScopeName: Scope->toString(),
2046 Kind: a.value().getKind(), Expected: a.value().toString(), Actual: b.value().toString());
2047 break;
2048 }
2049 }
2050 }
2051 }
2052 // Remove locks first to allow lock upgrading/downgrading.
2053 // FIXME -- should only fully remove if the attribute refers to 'this'.
2054 bool Dtor = isa<CXXDestructorDecl>(Val: D);
2055 for (const auto &M : ExclusiveLocksToRemove)
2056 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Exclusive);
2057 for (const auto &M : SharedLocksToRemove)
2058 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Shared);
2059 for (const auto &M : GenericLocksToRemove)
2060 Analyzer->removeLock(FSet, Cp: M, UnlockLoc: Loc, FullyRemove: Dtor, ReceivedKind: LK_Generic);
2061
2062 // Add locks.
2063 FactEntry::SourceKind Source =
2064 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2065 for (const auto &M : ExclusiveLocksToAdd)
2066 Analyzer->addLock(FSet, Entry: std::make_unique<LockableFactEntry>(args: M, args: LK_Exclusive,
2067 args&: Loc, args&: Source));
2068 for (const auto &M : SharedLocksToAdd)
2069 Analyzer->addLock(
2070 FSet, Entry: std::make_unique<LockableFactEntry>(args: M, args: LK_Shared, args&: Loc, args&: Source));
2071
2072 if (!Scp.shouldIgnore()) {
2073 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2074 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
2075 args&: Scp, args&: Loc, args: FactEntry::Acquired);
2076 for (const auto &M : ExclusiveLocksToAdd)
2077 ScopedEntry->addLock(M);
2078 for (const auto &M : SharedLocksToAdd)
2079 ScopedEntry->addLock(M);
2080 for (const auto &M : ScopedReqsAndExcludes)
2081 ScopedEntry->addLock(M);
2082 for (const auto &M : ExclusiveLocksToRemove)
2083 ScopedEntry->addExclusiveUnlock(M);
2084 for (const auto &M : SharedLocksToRemove)
2085 ScopedEntry->addSharedUnlock(M);
2086 Analyzer->addLock(FSet, Entry: std::move(ScopedEntry));
2087 }
2088}
2089
2090/// For unary operations which read and write a variable, we need to
2091/// check whether we hold any required mutexes. Reads are checked in
2092/// VisitCastExpr.
2093void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2094 switch (UO->getOpcode()) {
2095 case UO_PostDec:
2096 case UO_PostInc:
2097 case UO_PreDec:
2098 case UO_PreInc:
2099 checkAccess(Exp: UO->getSubExpr(), AK: AK_Written);
2100 break;
2101 default:
2102 break;
2103 }
2104}
2105
2106/// For binary operations which assign to a variable (writes), we need to check
2107/// whether we hold any required mutexes.
2108/// FIXME: Deal with non-primitive types.
2109void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2110 if (!BO->isAssignmentOp())
2111 return;
2112
2113 // adjust the context
2114 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
2115
2116 checkAccess(Exp: BO->getLHS(), AK: AK_Written);
2117}
2118
2119/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2120/// need to ensure we hold any required mutexes.
2121/// FIXME: Deal with non-primitive types.
2122void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2123 if (CE->getCastKind() != CK_LValueToRValue)
2124 return;
2125 checkAccess(Exp: CE->getSubExpr(), AK: AK_Read);
2126}
2127
2128void BuildLockset::examineArguments(const FunctionDecl *FD,
2129 CallExpr::const_arg_iterator ArgBegin,
2130 CallExpr::const_arg_iterator ArgEnd,
2131 bool SkipFirstParam) {
2132 // Currently we can't do anything if we don't know the function declaration.
2133 if (!FD)
2134 return;
2135
2136 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2137 // only turns off checking within the body of a function, but we also
2138 // use it to turn off checking in arguments to the function. This
2139 // could result in some false negatives, but the alternative is to
2140 // create yet another attribute.
2141 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2142 return;
2143
2144 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2145 auto Param = Params.begin();
2146 if (SkipFirstParam)
2147 ++Param;
2148
2149 // There can be default arguments, so we stop when one iterator is at end().
2150 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2151 ++Param, ++Arg) {
2152 QualType Qt = (*Param)->getType();
2153 if (Qt->isReferenceType())
2154 checkAccess(*Arg, AK_Read, POK_PassByRef);
2155 else if (Qt->isPointerType())
2156 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2157 }
2158}
2159
2160void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2161 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Val: Exp)) {
2162 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2163 // ME can be null when calling a method pointer
2164 const CXXMethodDecl *MD = CE->getMethodDecl();
2165
2166 if (ME && MD) {
2167 if (ME->isArrow()) {
2168 // Should perhaps be AK_Written if !MD->isConst().
2169 checkPtAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2170 } else {
2171 // Should perhaps be AK_Written if !MD->isConst().
2172 checkAccess(Exp: CE->getImplicitObjectArgument(), AK: AK_Read);
2173 }
2174 }
2175
2176 examineArguments(FD: CE->getDirectCallee(), ArgBegin: CE->arg_begin(), ArgEnd: CE->arg_end());
2177 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Val: Exp)) {
2178 OverloadedOperatorKind OEop = OE->getOperator();
2179 switch (OEop) {
2180 case OO_Equal:
2181 case OO_PlusEqual:
2182 case OO_MinusEqual:
2183 case OO_StarEqual:
2184 case OO_SlashEqual:
2185 case OO_PercentEqual:
2186 case OO_CaretEqual:
2187 case OO_AmpEqual:
2188 case OO_PipeEqual:
2189 case OO_LessLessEqual:
2190 case OO_GreaterGreaterEqual:
2191 checkAccess(Exp: OE->getArg(1), AK: AK_Read);
2192 [[fallthrough]];
2193 case OO_PlusPlus:
2194 case OO_MinusMinus:
2195 checkAccess(Exp: OE->getArg(0), AK: AK_Written);
2196 break;
2197 case OO_Star:
2198 case OO_ArrowStar:
2199 case OO_Arrow:
2200 case OO_Subscript:
2201 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2202 // Grrr. operator* can be multiplication...
2203 checkPtAccess(Exp: OE->getArg(0), AK: AK_Read);
2204 }
2205 [[fallthrough]];
2206 default: {
2207 // TODO: get rid of this, and rely on pass-by-ref instead.
2208 const Expr *Obj = OE->getArg(0);
2209 checkAccess(Exp: Obj, AK: AK_Read);
2210 // Check the remaining arguments. For method operators, the first
2211 // argument is the implicit self argument, and doesn't appear in the
2212 // FunctionDecl, but for non-methods it does.
2213 const FunctionDecl *FD = OE->getDirectCallee();
2214 examineArguments(FD, ArgBegin: std::next(OE->arg_begin()), ArgEnd: OE->arg_end(),
2215 /*SkipFirstParam*/ !isa<CXXMethodDecl>(Val: FD));
2216 break;
2217 }
2218 }
2219 } else {
2220 examineArguments(FD: Exp->getDirectCallee(), ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2221 }
2222
2223 auto *D = dyn_cast_or_null<NamedDecl>(Val: Exp->getCalleeDecl());
2224 if (!D)
2225 return;
2226 handleCall(Exp, D);
2227}
2228
2229void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2230 const CXXConstructorDecl *D = Exp->getConstructor();
2231 if (D && D->isCopyConstructor()) {
2232 const Expr* Source = Exp->getArg(Arg: 0);
2233 checkAccess(Exp: Source, AK: AK_Read);
2234 } else {
2235 examineArguments(FD: D, ArgBegin: Exp->arg_begin(), ArgEnd: Exp->arg_end());
2236 }
2237 if (D && D->hasAttrs())
2238 handleCall(Exp, D);
2239}
2240
2241static const Expr *UnpackConstruction(const Expr *E) {
2242 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2243 if (CE->getCastKind() == CK_NoOp)
2244 E = CE->getSubExpr()->IgnoreParens();
2245 if (auto *CE = dyn_cast<CastExpr>(Val: E))
2246 if (CE->getCastKind() == CK_ConstructorConversion ||
2247 CE->getCastKind() == CK_UserDefinedConversion)
2248 E = CE->getSubExpr();
2249 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(Val: E))
2250 E = BTE->getSubExpr();
2251 return E;
2252}
2253
2254void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2255 // adjust the context
2256 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, C: LVarCtx);
2257
2258 for (auto *D : S->getDeclGroup()) {
2259 if (auto *VD = dyn_cast_or_null<VarDecl>(Val: D)) {
2260 const Expr *E = VD->getInit();
2261 if (!E)
2262 continue;
2263 E = E->IgnoreParens();
2264
2265 // handle constructors that involve temporaries
2266 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: E))
2267 E = EWC->getSubExpr()->IgnoreParens();
2268 E = UnpackConstruction(E);
2269
2270 if (auto Object = Analyzer->ConstructedObjects.find(Val: E);
2271 Object != Analyzer->ConstructedObjects.end()) {
2272 Object->second->setClangDecl(VD);
2273 Analyzer->ConstructedObjects.erase(I: Object);
2274 }
2275 }
2276 }
2277}
2278
2279void BuildLockset::VisitMaterializeTemporaryExpr(
2280 const MaterializeTemporaryExpr *Exp) {
2281 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2282 if (auto Object = Analyzer->ConstructedObjects.find(
2283 Val: UnpackConstruction(E: Exp->getSubExpr()));
2284 Object != Analyzer->ConstructedObjects.end()) {
2285 Object->second->setClangDecl(ExtD);
2286 Analyzer->ConstructedObjects.erase(I: Object);
2287 }
2288 }
2289}
2290
2291void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2292 if (Analyzer->CurrentFunction == nullptr)
2293 return;
2294 const Expr *RetVal = S->getRetValue();
2295 if (!RetVal)
2296 return;
2297
2298 // If returning by reference or pointer, check that the function requires the
2299 // appropriate capabilities.
2300 const QualType ReturnType =
2301 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2302 if (ReturnType->isLValueReferenceType()) {
2303 Analyzer->checkAccess(
2304 FSet: FunctionExitFSet, Exp: RetVal,
2305 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2306 POK: POK_ReturnByRef);
2307 } else if (ReturnType->isPointerType()) {
2308 Analyzer->checkPtAccess(
2309 FSet: FunctionExitFSet, Exp: RetVal,
2310 AK: ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
2311 POK: POK_ReturnPointer);
2312 }
2313}
2314
2315/// Given two facts merging on a join point, possibly warn and decide whether to
2316/// keep or replace.
2317///
2318/// \return false if we should keep \p A, true if we should take \p B.
2319bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2320 SourceLocation JoinLoc,
2321 LockErrorKind EntryLEK) {
2322 // Whether we can replace \p A by \p B.
2323 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2324 unsigned int ReentrancyDepthA = 0;
2325 unsigned int ReentrancyDepthB = 0;
2326
2327 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &A))
2328 ReentrancyDepthA = LFE->getReentrancyDepth();
2329 if (const auto *LFE = dyn_cast<LockableFactEntry>(Val: &B))
2330 ReentrancyDepthB = LFE->getReentrancyDepth();
2331
2332 if (ReentrancyDepthA != ReentrancyDepthB) {
2333 Handler.handleMutexHeldEndOfScope(Kind: B.getKind(), LockName: B.toString(), LocLocked: B.loc(),
2334 LocEndOfScope: JoinLoc, LEK: EntryLEK,
2335 /*ReentrancyMismatch=*/true);
2336 // Pick the FactEntry with the greater reentrancy depth as the "good"
2337 // fact to reduce potential later warnings.
2338 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2339 } else if (A.kind() != B.kind()) {
2340 // For managed capabilities, the destructor should unlock in the right mode
2341 // anyway. For asserted capabilities no unlocking is needed.
2342 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2343 // The shared capability subsumes the exclusive capability, if possible.
2344 bool ShouldTakeB = B.kind() == LK_Shared;
2345 if (CanModify || !ShouldTakeB)
2346 return ShouldTakeB;
2347 }
2348 Handler.handleExclusiveAndShared(Kind: B.getKind(), LockName: B.toString(), Loc1: B.loc(),
2349 Loc2: A.loc());
2350 // Take the exclusive capability to reduce further warnings.
2351 return CanModify && B.kind() == LK_Exclusive;
2352 } else {
2353 // The non-asserted capability is the one we want to track.
2354 return CanModify && A.asserted() && !B.asserted();
2355 }
2356}
2357
2358/// Compute the intersection of two locksets and issue warnings for any
2359/// locks in the symmetric difference.
2360///
2361/// This function is used at a merge point in the CFG when comparing the lockset
2362/// of each branch being merged. For example, given the following sequence:
2363/// A; if () then B; else C; D; we need to check that the lockset after B and C
2364/// are the same. In the event of a difference, we use the intersection of these
2365/// two locksets at the start of D.
2366///
2367/// \param EntrySet A lockset for entry into a (possibly new) block.
2368/// \param ExitSet The lockset on exiting a preceding block.
2369/// \param JoinLoc The location of the join point for error reporting
2370/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2371/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2372void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2373 const FactSet &ExitSet,
2374 SourceLocation JoinLoc,
2375 LockErrorKind EntryLEK,
2376 LockErrorKind ExitLEK) {
2377 FactSet EntrySetOrig = EntrySet;
2378
2379 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2380 for (const auto &Fact : ExitSet) {
2381 const FactEntry &ExitFact = FactMan[Fact];
2382
2383 FactSet::iterator EntryIt = EntrySet.findLockIter(FM&: FactMan, CapE: ExitFact);
2384 if (EntryIt != EntrySet.end()) {
2385 if (join(A: FactMan[*EntryIt], B: ExitFact, JoinLoc, EntryLEK))
2386 *EntryIt = Fact;
2387 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2388 ExitFact.handleRemovalFromIntersection(FSet: ExitSet, FactMan, JoinLoc,
2389 LEK: EntryLEK, Handler);
2390 }
2391 }
2392
2393 // Find locks in EntrySet that are not in ExitSet, and remove them.
2394 for (const auto &Fact : EntrySetOrig) {
2395 const FactEntry *EntryFact = &FactMan[Fact];
2396 const FactEntry *ExitFact = ExitSet.findLock(FM&: FactMan, CapE: *EntryFact);
2397
2398 if (!ExitFact) {
2399 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2400 ExitLEK == LEK_NotLockedAtEndOfFunction)
2401 EntryFact->handleRemovalFromIntersection(FSet: EntrySetOrig, FactMan, JoinLoc,
2402 LEK: ExitLEK, Handler);
2403 if (ExitLEK == LEK_LockedSomePredecessors)
2404 EntrySet.removeLock(FM&: FactMan, CapE: *EntryFact);
2405 }
2406 }
2407}
2408
2409// Return true if block B never continues to its successors.
2410static bool neverReturns(const CFGBlock *B) {
2411 if (B->hasNoReturnElement())
2412 return true;
2413 if (B->empty())
2414 return false;
2415
2416 CFGElement Last = B->back();
2417 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2418 if (isa<CXXThrowExpr>(Val: S->getStmt()))
2419 return true;
2420 }
2421 return false;
2422}
2423
2424/// Check a function's CFG for thread-safety violations.
2425///
2426/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2427/// at the end of each block, and issue warnings for thread safety violations.
2428/// Each block in the CFG is traversed exactly once.
2429void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2430 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2431 // For now, we just use the walker to set things up.
2432 threadSafety::CFGWalker walker;
2433 if (!walker.init(AC))
2434 return;
2435
2436 // AC.dumpCFG(true);
2437 // threadSafety::printSCFG(walker);
2438
2439 CFG *CFGraph = walker.getGraph();
2440 const NamedDecl *D = walker.getDecl();
2441 CurrentFunction = dyn_cast<FunctionDecl>(Val: D);
2442
2443 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2444 return;
2445
2446 // FIXME: Do something a bit more intelligent inside constructor and
2447 // destructor code. Constructors and destructors must assume unique access
2448 // to 'this', so checks on member variable access is disabled, but we should
2449 // still enable checks on other objects.
2450 if (isa<CXXConstructorDecl>(Val: D))
2451 return; // Don't check inside constructors.
2452 if (isa<CXXDestructorDecl>(Val: D))
2453 return; // Don't check inside destructors.
2454
2455 Handler.enterFunction(FD: CurrentFunction);
2456
2457 BlockInfo.resize(new_size: CFGraph->getNumBlockIDs(),
2458 x: CFGBlockInfo::getEmptyBlockInfo(M&: LocalVarMap));
2459
2460 // We need to explore the CFG via a "topological" ordering.
2461 // That way, we will be guaranteed to have information about required
2462 // predecessor locksets when exploring a new block.
2463 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2464 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2465
2466 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2467 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2468
2469 // Mark entry block as reachable
2470 Initial.Reachable = true;
2471
2472 // Compute SSA names for local variables
2473 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2474
2475 // Fill in source locations for all CFGBlocks.
2476 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2477
2478 CapExprSet ExclusiveLocksAcquired;
2479 CapExprSet SharedLocksAcquired;
2480 CapExprSet LocksReleased;
2481
2482 // Add locks from exclusive_locks_required and shared_locks_required
2483 // to initial lockset. Also turn off checking for lock and unlock functions.
2484 // FIXME: is there a more intelligent way to check lock/unlock functions?
2485 if (!SortedGraph->empty()) {
2486 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2487 FactSet &InitialLockset = Initial.EntrySet;
2488
2489 CapExprSet ExclusiveLocksToAdd;
2490 CapExprSet SharedLocksToAdd;
2491
2492 SourceLocation Loc = D->getLocation();
2493 for (const auto *Attr : D->attrs()) {
2494 Loc = Attr->getLocation();
2495 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2496 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2497 nullptr, D);
2498 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2499 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2500 // We must ignore such methods.
2501 if (A->args_size() == 0)
2502 return;
2503 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2504 nullptr, D);
2505 getMutexIDs(LocksReleased, A, nullptr, D);
2506 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2507 if (A->args_size() == 0)
2508 return;
2509 getMutexIDs(A->isShared() ? SharedLocksAcquired
2510 : ExclusiveLocksAcquired,
2511 A, nullptr, D);
2512 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2513 // Don't try to check trylock functions for now.
2514 return;
2515 }
2516 }
2517 ArrayRef<ParmVarDecl *> Params;
2518 if (CurrentFunction)
2519 Params = CurrentFunction->getCanonicalDecl()->parameters();
2520 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(Val: D))
2521 Params = CurrentMethod->getCanonicalDecl()->parameters();
2522 else
2523 llvm_unreachable("Unknown function kind");
2524 for (const ParmVarDecl *Param : Params) {
2525 CapExprSet UnderlyingLocks;
2526 for (const auto *Attr : Param->attrs()) {
2527 Loc = Attr->getLocation();
2528 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2529 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2530 nullptr, Param);
2531 getMutexIDs(LocksReleased, A, nullptr, Param);
2532 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2533 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2534 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2535 nullptr, Param);
2536 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2537 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2538 getMutexIDs(A->isShared() ? SharedLocksAcquired
2539 : ExclusiveLocksAcquired,
2540 A, nullptr, Param);
2541 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2542 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2543 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2544 }
2545 }
2546 if (UnderlyingLocks.empty())
2547 continue;
2548 CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
2549 /*Neg=*/false, /*Reentrant=*/false);
2550 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
2551 Cp, Param->getLocation(), FactEntry::Declared);
2552 for (const CapabilityExpr &M : UnderlyingLocks)
2553 ScopedEntry->addLock(M);
2554 addLock(FSet&: InitialLockset, Entry: std::move(ScopedEntry), ReqAttr: true);
2555 }
2556
2557 // FIXME -- Loc can be wrong here.
2558 for (const auto &Mu : ExclusiveLocksToAdd) {
2559 auto Entry = std::make_unique<LockableFactEntry>(args: Mu, args: LK_Exclusive, args&: Loc,
2560 args: FactEntry::Declared);
2561 addLock(FSet&: InitialLockset, Entry: std::move(Entry), ReqAttr: true);
2562 }
2563 for (const auto &Mu : SharedLocksToAdd) {
2564 auto Entry = std::make_unique<LockableFactEntry>(args: Mu, args: LK_Shared, args&: Loc,
2565 args: FactEntry::Declared);
2566 addLock(FSet&: InitialLockset, Entry: std::move(Entry), ReqAttr: true);
2567 }
2568 }
2569
2570 // Compute the expected exit set.
2571 // By default, we expect all locks held on entry to be held on exit.
2572 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2573
2574 // Adjust the expected exit set by adding or removing locks, as declared
2575 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2576 // issue the appropriate warning.
2577 // FIXME: the location here is not quite right.
2578 for (const auto &Lock : ExclusiveLocksAcquired)
2579 ExpectedFunctionExitSet.addLock(
2580 FM&: FactMan, Entry: std::make_unique<LockableFactEntry>(Lock, LK_Exclusive,
2581 D->getLocation()));
2582 for (const auto &Lock : SharedLocksAcquired)
2583 ExpectedFunctionExitSet.addLock(
2584 FM&: FactMan,
2585 Entry: std::make_unique<LockableFactEntry>(Lock, LK_Shared, D->getLocation()));
2586 for (const auto &Lock : LocksReleased)
2587 ExpectedFunctionExitSet.removeLock(FM&: FactMan, CapE: Lock);
2588
2589 for (const auto *CurrBlock : *SortedGraph) {
2590 unsigned CurrBlockID = CurrBlock->getBlockID();
2591 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2592
2593 // Use the default initial lockset in case there are no predecessors.
2594 VisitedBlocks.insert(Block: CurrBlock);
2595
2596 // Iterate through the predecessor blocks and warn if the lockset for all
2597 // predecessors is not the same. We take the entry lockset of the current
2598 // block to be the intersection of all previous locksets.
2599 // FIXME: By keeping the intersection, we may output more errors in future
2600 // for a lock which is not in the intersection, but was in the union. We
2601 // may want to also keep the union in future. As an example, let's say
2602 // the intersection contains Mutex L, and the union contains L and M.
2603 // Later we unlock M. At this point, we would output an error because we
2604 // never locked M; although the real error is probably that we forgot to
2605 // lock M on all code paths. Conversely, let's say that later we lock M.
2606 // In this case, we should compare against the intersection instead of the
2607 // union because the real error is probably that we forgot to unlock M on
2608 // all code paths.
2609 bool LocksetInitialized = false;
2610 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2611 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2612 // if *PI -> CurrBlock is a back edge
2613 if (*PI == nullptr || !VisitedBlocks.alreadySet(Block: *PI))
2614 continue;
2615
2616 unsigned PrevBlockID = (*PI)->getBlockID();
2617 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2618
2619 // Ignore edges from blocks that can't return.
2620 if (neverReturns(B: *PI) || !PrevBlockInfo->Reachable)
2621 continue;
2622
2623 // Okay, we can reach this block from the entry.
2624 CurrBlockInfo->Reachable = true;
2625
2626 FactSet PrevLockset;
2627 getEdgeLockset(Result&: PrevLockset, ExitSet: PrevBlockInfo->ExitSet, PredBlock: *PI, CurrBlock);
2628
2629 if (!LocksetInitialized) {
2630 CurrBlockInfo->EntrySet = PrevLockset;
2631 LocksetInitialized = true;
2632 } else {
2633 // Surprisingly 'continue' doesn't always produce back edges, because
2634 // the CFG has empty "transition" blocks where they meet with the end
2635 // of the regular loop body. We still want to diagnose them as loop.
2636 intersectAndWarn(
2637 EntrySet&: CurrBlockInfo->EntrySet, ExitSet: PrevLockset, JoinLoc: CurrBlockInfo->EntryLoc,
2638 LEK: isa_and_nonnull<ContinueStmt>(Val: (*PI)->getTerminatorStmt())
2639 ? LEK_LockedSomeLoopIterations
2640 : LEK_LockedSomePredecessors);
2641 }
2642 }
2643
2644 // Skip rest of block if it's not reachable.
2645 if (!CurrBlockInfo->Reachable)
2646 continue;
2647
2648 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2649
2650 // Visit all the statements in the basic block.
2651 for (const auto &BI : *CurrBlock) {
2652 switch (BI.getKind()) {
2653 case CFGElement::Statement: {
2654 CFGStmt CS = BI.castAs<CFGStmt>();
2655 LocksetBuilder.Visit(CS.getStmt());
2656 break;
2657 }
2658 // Ignore BaseDtor and MemberDtor for now.
2659 case CFGElement::AutomaticObjectDtor: {
2660 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2661 const auto *DD = AD.getDestructorDecl(astContext&: AC.getASTContext());
2662 if (!DD->hasAttrs())
2663 break;
2664
2665 LocksetBuilder.handleCall(nullptr, DD,
2666 SxBuilder.createVariable(VD: AD.getVarDecl()),
2667 AD.getTriggerStmt()->getEndLoc());
2668 break;
2669 }
2670
2671 case CFGElement::CleanupFunction: {
2672 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2673 LocksetBuilder.handleCall(/*Exp=*/nullptr, D: CF.getFunctionDecl(),
2674 Self: SxBuilder.createVariable(VD: CF.getVarDecl()),
2675 Loc: CF.getVarDecl()->getLocation());
2676 break;
2677 }
2678
2679 case CFGElement::TemporaryDtor: {
2680 auto TD = BI.castAs<CFGTemporaryDtor>();
2681
2682 // Clean up constructed object even if there are no attributes to
2683 // keep the number of objects in limbo as small as possible.
2684 if (auto Object = ConstructedObjects.find(
2685 Val: TD.getBindTemporaryExpr()->getSubExpr());
2686 Object != ConstructedObjects.end()) {
2687 const auto *DD = TD.getDestructorDecl(astContext&: AC.getASTContext());
2688 if (DD->hasAttrs())
2689 // TODO: the location here isn't quite correct.
2690 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2691 TD.getBindTemporaryExpr()->getEndLoc());
2692 ConstructedObjects.erase(I: Object);
2693 }
2694 break;
2695 }
2696 default:
2697 break;
2698 }
2699 }
2700 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2701
2702 // For every back edge from CurrBlock (the end of the loop) to another block
2703 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2704 // the one held at the beginning of FirstLoopBlock. We can look up the
2705 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2706 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2707 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2708 // if CurrBlock -> *SI is *not* a back edge
2709 if (*SI == nullptr || !VisitedBlocks.alreadySet(Block: *SI))
2710 continue;
2711
2712 CFGBlock *FirstLoopBlock = *SI;
2713 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2714 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2715 intersectAndWarn(EntrySet&: PreLoop->EntrySet, ExitSet: LoopEnd->ExitSet, JoinLoc: PreLoop->EntryLoc,
2716 LEK: LEK_LockedSomeLoopIterations);
2717 }
2718 }
2719
2720 // Skip the final check if the exit block is unreachable.
2721 if (!Final.Reachable)
2722 return;
2723
2724 // FIXME: Should we call this function for all blocks which exit the function?
2725 intersectAndWarn(EntrySet&: ExpectedFunctionExitSet, ExitSet: Final.ExitSet, JoinLoc: Final.ExitLoc,
2726 EntryLEK: LEK_LockedAtEndOfFunction, ExitLEK: LEK_NotLockedAtEndOfFunction);
2727
2728 Handler.leaveFunction(FD: CurrentFunction);
2729}
2730
2731/// Check a function's CFG for thread-safety violations.
2732///
2733/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2734/// at the end of each block, and issue warnings for thread safety violations.
2735/// Each block in the CFG is traversed exactly once.
2736void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2737 ThreadSafetyHandler &Handler,
2738 BeforeSet **BSet) {
2739 if (!*BSet)
2740 *BSet = new BeforeSet;
2741 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2742 Analyzer.runAnalysis(AC);
2743}
2744
2745void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2746
2747/// Helper function that returns a LockKind required for the given level
2748/// of access.
2749LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2750 switch (AK) {
2751 case AK_Read :
2752 return LK_Shared;
2753 case AK_Written :
2754 return LK_Exclusive;
2755 }
2756 llvm_unreachable("Unknown AccessKind");
2757}
2758

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of clang/lib/Analysis/ThreadSafety.cpp