1 | //===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements pieces of the Preprocessor interface that manage the |
10 | // caching of lexed tokens. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "clang/Lex/Preprocessor.h" |
15 | using namespace clang; |
16 | |
17 | std::pair<Preprocessor::CachedTokensTy::size_type, bool> |
18 | Preprocessor::LastBacktrackPos() { |
19 | assert(isBacktrackEnabled()); |
20 | auto BacktrackPos = BacktrackPositions.back(); |
21 | bool Unannotated = |
22 | static_cast<CachedTokensTy::difference_type>(BacktrackPos) < 0; |
23 | return {Unannotated ? ~BacktrackPos : BacktrackPos, Unannotated}; |
24 | } |
25 | |
26 | // EnableBacktrackAtThisPos - From the point that this method is called, and |
27 | // until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor |
28 | // keeps track of the lexed tokens so that a subsequent Backtrack() call will |
29 | // make the Preprocessor re-lex the same tokens. |
30 | // |
31 | // Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can |
32 | // be called multiple times and CommitBacktrackedTokens/Backtrack calls will |
33 | // be combined with the EnableBacktrackAtThisPos calls in reverse order. |
34 | void Preprocessor::EnableBacktrackAtThisPos(bool Unannotated) { |
35 | assert(LexLevel == 0 && "cannot use lookahead while lexing" ); |
36 | BacktrackPositions.push_back(x: Unannotated ? ~CachedLexPos : CachedLexPos); |
37 | if (Unannotated) |
38 | UnannotatedBacktrackTokens.emplace_back(args&: CachedTokens, args: CachedTokens.size()); |
39 | EnterCachingLexMode(); |
40 | } |
41 | |
42 | Preprocessor::CachedTokensTy Preprocessor::PopUnannotatedBacktrackTokens() { |
43 | assert(isUnannotatedBacktrackEnabled() && "missing unannotated tokens?" ); |
44 | auto [UnannotatedTokens, NumCachedToks] = |
45 | std::move(UnannotatedBacktrackTokens.back()); |
46 | UnannotatedBacktrackTokens.pop_back(); |
47 | // If another unannotated backtrack is active, propagate any tokens that were |
48 | // lexed (not cached) since EnableBacktrackAtThisPos was last called. |
49 | if (isUnannotatedBacktrackEnabled()) |
50 | UnannotatedBacktrackTokens.back().first.append( |
51 | in_start: UnannotatedTokens.begin() + NumCachedToks, in_end: UnannotatedTokens.end()); |
52 | return std::move(UnannotatedTokens); |
53 | } |
54 | |
55 | // Disable the last EnableBacktrackAtThisPos call. |
56 | void Preprocessor::CommitBacktrackedTokens() { |
57 | assert(isBacktrackEnabled() && "EnableBacktrackAtThisPos was not called!" ); |
58 | auto [BacktrackPos, Unannotated] = LastBacktrackPos(); |
59 | BacktrackPositions.pop_back(); |
60 | if (Unannotated) |
61 | PopUnannotatedBacktrackTokens(); |
62 | } |
63 | |
64 | // Make Preprocessor re-lex the tokens that were lexed since |
65 | // EnableBacktrackAtThisPos() was previously called. |
66 | void Preprocessor::Backtrack() { |
67 | assert(isBacktrackEnabled() && "EnableBacktrackAtThisPos was not called!" ); |
68 | auto [BacktrackPos, Unannotated] = LastBacktrackPos(); |
69 | BacktrackPositions.pop_back(); |
70 | CachedLexPos = BacktrackPos; |
71 | if (Unannotated) |
72 | CachedTokens = PopUnannotatedBacktrackTokens(); |
73 | recomputeCurLexerKind(); |
74 | } |
75 | |
76 | void Preprocessor::CachingLex(Token &Result) { |
77 | if (!InCachingLexMode()) |
78 | return; |
79 | |
80 | // The assert in EnterCachingLexMode should prevent this from happening. |
81 | assert(LexLevel == 1 && |
82 | "should not use token caching within the preprocessor" ); |
83 | |
84 | if (CachedLexPos < CachedTokens.size()) { |
85 | Result = CachedTokens[CachedLexPos++]; |
86 | Result.setFlag(Token::IsReinjected); |
87 | return; |
88 | } |
89 | |
90 | ExitCachingLexMode(); |
91 | Lex(Result); |
92 | |
93 | if (isBacktrackEnabled()) { |
94 | // Cache the lexed token. |
95 | EnterCachingLexModeUnchecked(); |
96 | CachedTokens.push_back(Elt: Result); |
97 | ++CachedLexPos; |
98 | if (isUnannotatedBacktrackEnabled()) |
99 | UnannotatedBacktrackTokens.back().first.push_back(Elt: Result); |
100 | return; |
101 | } |
102 | |
103 | if (CachedLexPos < CachedTokens.size()) { |
104 | EnterCachingLexModeUnchecked(); |
105 | } else { |
106 | // All cached tokens were consumed. |
107 | CachedTokens.clear(); |
108 | CachedLexPos = 0; |
109 | } |
110 | } |
111 | |
112 | void Preprocessor::EnterCachingLexMode() { |
113 | // The caching layer sits on top of all the other lexers, so it's incorrect |
114 | // to cache tokens while inside a nested lex action. The cached tokens would |
115 | // be retained after returning to the enclosing lex action and, at best, |
116 | // would appear at the wrong position in the token stream. |
117 | assert(LexLevel == 0 && |
118 | "entered caching lex mode while lexing something else" ); |
119 | |
120 | if (InCachingLexMode()) { |
121 | assert(CurLexerCallback == CLK_CachingLexer && "Unexpected lexer kind" ); |
122 | return; |
123 | } |
124 | |
125 | EnterCachingLexModeUnchecked(); |
126 | } |
127 | |
128 | void Preprocessor::EnterCachingLexModeUnchecked() { |
129 | assert(CurLexerCallback != CLK_CachingLexer && "already in caching lex mode" ); |
130 | PushIncludeMacroStack(); |
131 | CurLexerCallback = CLK_CachingLexer; |
132 | } |
133 | |
134 | |
135 | const Token &Preprocessor::PeekAhead(unsigned N) { |
136 | assert(CachedLexPos + N > CachedTokens.size() && "Confused caching." ); |
137 | ExitCachingLexMode(); |
138 | for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) { |
139 | CachedTokens.push_back(Elt: Token()); |
140 | Lex(Result&: CachedTokens.back()); |
141 | if (isUnannotatedBacktrackEnabled()) |
142 | UnannotatedBacktrackTokens.back().first.push_back(Elt: CachedTokens.back()); |
143 | } |
144 | EnterCachingLexMode(); |
145 | return CachedTokens.back(); |
146 | } |
147 | |
148 | void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) { |
149 | assert(Tok.isAnnotation() && "Expected annotation token" ); |
150 | assert(CachedLexPos != 0 && "Expected to have some cached tokens" ); |
151 | assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc() |
152 | && "The annotation should be until the most recent cached token" ); |
153 | |
154 | // Start from the end of the cached tokens list and look for the token |
155 | // that is the beginning of the annotation token. |
156 | for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) { |
157 | CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1; |
158 | if (AnnotBegin->getLocation() == Tok.getLocation()) { |
159 | assert((!isBacktrackEnabled() || LastBacktrackPos().first <= i) && |
160 | "The backtrack pos points inside the annotated tokens!" ); |
161 | // Replace the cached tokens with the single annotation token. |
162 | if (i < CachedLexPos) |
163 | CachedTokens.erase(CS: AnnotBegin + 1, CE: CachedTokens.begin() + CachedLexPos); |
164 | *AnnotBegin = Tok; |
165 | CachedLexPos = i; |
166 | return; |
167 | } |
168 | } |
169 | } |
170 | |
171 | bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const { |
172 | // There's currently no cached token... |
173 | if (!CachedLexPos) |
174 | return false; |
175 | |
176 | const Token LastCachedTok = CachedTokens[CachedLexPos - 1]; |
177 | if (LastCachedTok.getKind() != Tok.getKind()) |
178 | return false; |
179 | |
180 | SourceLocation::IntTy RelOffset = 0; |
181 | if ((!getSourceManager().isInSameSLocAddrSpace( |
182 | LHS: Tok.getLocation(), RHS: getLastCachedTokenLocation(), RelativeOffset: &RelOffset)) || |
183 | RelOffset) |
184 | return false; |
185 | |
186 | return true; |
187 | } |
188 | |
189 | void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) { |
190 | assert(CachedLexPos != 0 && "Expected to have some cached tokens" ); |
191 | CachedTokens.insert(I: CachedTokens.begin() + CachedLexPos - 1, From: NewToks.begin(), |
192 | To: NewToks.end()); |
193 | CachedTokens.erase(CI: CachedTokens.begin() + CachedLexPos - 1 + NewToks.size()); |
194 | CachedLexPos += NewToks.size() - 1; |
195 | } |
196 | |