1 | #![allow ( |
2 | clippy::assertions_on_result_states, |
3 | clippy::items_after_statements, |
4 | clippy::non_ascii_literal, |
5 | clippy::octal_escapes |
6 | )] |
7 | |
8 | use proc_macro2::{Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; |
9 | use std::iter; |
10 | use std::str::{self, FromStr}; |
11 | |
12 | #[test] |
13 | fn idents() { |
14 | assert_eq!( |
15 | Ident::new("String" , Span::call_site()).to_string(), |
16 | "String" |
17 | ); |
18 | assert_eq!(Ident::new("fn" , Span::call_site()).to_string(), "fn" ); |
19 | assert_eq!(Ident::new("_" , Span::call_site()).to_string(), "_" ); |
20 | } |
21 | |
22 | #[test] |
23 | fn raw_idents() { |
24 | assert_eq!( |
25 | Ident::new_raw("String" , Span::call_site()).to_string(), |
26 | "r#String" |
27 | ); |
28 | assert_eq!(Ident::new_raw("fn" , Span::call_site()).to_string(), "r#fn" ); |
29 | } |
30 | |
31 | #[test] |
32 | #[should_panic (expected = "`r#_` cannot be a raw identifier" )] |
33 | fn ident_raw_underscore() { |
34 | Ident::new_raw("_" , Span::call_site()); |
35 | } |
36 | |
37 | #[test] |
38 | #[should_panic (expected = "`r#super` cannot be a raw identifier" )] |
39 | fn ident_raw_reserved() { |
40 | Ident::new_raw("super" , Span::call_site()); |
41 | } |
42 | |
43 | #[test] |
44 | #[should_panic (expected = "Ident is not allowed to be empty; use Option<Ident>" )] |
45 | fn ident_empty() { |
46 | Ident::new("" , Span::call_site()); |
47 | } |
48 | |
49 | #[test] |
50 | #[should_panic (expected = "Ident cannot be a number; use Literal instead" )] |
51 | fn ident_number() { |
52 | Ident::new("255" , Span::call_site()); |
53 | } |
54 | |
55 | #[test] |
56 | #[should_panic (expected = " \"a# \" is not a valid Ident" )] |
57 | fn ident_invalid() { |
58 | Ident::new("a#" , Span::call_site()); |
59 | } |
60 | |
61 | #[test] |
62 | #[should_panic (expected = "not a valid Ident" )] |
63 | fn raw_ident_empty() { |
64 | Ident::new("r#" , Span::call_site()); |
65 | } |
66 | |
67 | #[test] |
68 | #[should_panic (expected = "not a valid Ident" )] |
69 | fn raw_ident_number() { |
70 | Ident::new("r#255" , Span::call_site()); |
71 | } |
72 | |
73 | #[test] |
74 | #[should_panic (expected = " \"r#a# \" is not a valid Ident" )] |
75 | fn raw_ident_invalid() { |
76 | Ident::new("r#a#" , Span::call_site()); |
77 | } |
78 | |
79 | #[test] |
80 | #[should_panic (expected = "not a valid Ident" )] |
81 | fn lifetime_empty() { |
82 | Ident::new("'" , Span::call_site()); |
83 | } |
84 | |
85 | #[test] |
86 | #[should_panic (expected = "not a valid Ident" )] |
87 | fn lifetime_number() { |
88 | Ident::new("'255" , Span::call_site()); |
89 | } |
90 | |
91 | #[test] |
92 | #[should_panic (expected = r#""'a#" is not a valid Ident"# )] |
93 | fn lifetime_invalid() { |
94 | Ident::new("'a#" , Span::call_site()); |
95 | } |
96 | |
97 | #[test] |
98 | fn literal_string() { |
99 | assert_eq!(Literal::string("foo" ).to_string(), " \"foo \"" ); |
100 | assert_eq!(Literal::string(" \"" ).to_string(), " \"\\\"\"" ); |
101 | assert_eq!(Literal::string("didn't" ).to_string(), " \"didn't \"" ); |
102 | assert_eq!( |
103 | Literal::string("a \00b \07c \08d \0e \0" ).to_string(), |
104 | " \"a \\x000b \\x007c \\08d \\0e \\0 \"" , |
105 | ); |
106 | |
107 | " \"\\\r\n x \"" .parse::<TokenStream>().unwrap(); |
108 | " \"\\\r\n \rx \"" .parse::<TokenStream>().unwrap_err(); |
109 | } |
110 | |
111 | #[test] |
112 | fn literal_raw_string() { |
113 | "r \"\r\n\"" .parse::<TokenStream>().unwrap(); |
114 | |
115 | fn raw_string_literal_with_hashes(n: usize) -> String { |
116 | let mut literal = String::new(); |
117 | literal.push('r' ); |
118 | literal.extend(iter::repeat('#' ).take(n)); |
119 | literal.push('"' ); |
120 | literal.push('"' ); |
121 | literal.extend(iter::repeat('#' ).take(n)); |
122 | literal |
123 | } |
124 | |
125 | raw_string_literal_with_hashes(255) |
126 | .parse::<TokenStream>() |
127 | .unwrap(); |
128 | |
129 | // https://github.com/rust-lang/rust/pull/95251 |
130 | raw_string_literal_with_hashes(256) |
131 | .parse::<TokenStream>() |
132 | .unwrap_err(); |
133 | } |
134 | |
135 | #[test] |
136 | fn literal_byte_string() { |
137 | assert_eq!(Literal::byte_string(b"" ).to_string(), "b \"\"" ); |
138 | assert_eq!( |
139 | Literal::byte_string(b" \0\t\n\r\"\\2 \x10" ).to_string(), |
140 | "b \"\\0 \\t \\n \\r \\\"\\\\2 \\x10 \"" , |
141 | ); |
142 | assert_eq!( |
143 | Literal::byte_string(b"a \00b \07c \08d \0e \0" ).to_string(), |
144 | "b \"a \\x000b \\x007c \\08d \\0e \\0 \"" , |
145 | ); |
146 | |
147 | "b \"\\\r\n x \"" .parse::<TokenStream>().unwrap(); |
148 | "b \"\\\r\n \rx \"" .parse::<TokenStream>().unwrap_err(); |
149 | "b \"\\\r\n \u{a0}x \"" .parse::<TokenStream>().unwrap_err(); |
150 | "br \"\u{a0}\"" .parse::<TokenStream>().unwrap_err(); |
151 | } |
152 | |
153 | #[test] |
154 | fn literal_c_string() { |
155 | let strings = r###" |
156 | c"hello\x80我叫\u{1F980}" // from the RFC |
157 | cr"\" |
158 | cr##"Hello "world"!"## |
159 | c"\t\n\r\"\\" |
160 | "### ; |
161 | |
162 | let mut tokens = strings.parse::<TokenStream>().unwrap().into_iter(); |
163 | |
164 | for expected in &[ |
165 | r#"c"hello\x80我叫\u{1F980}""# , |
166 | r#"cr"\""# , |
167 | r###"cr##"Hello "world"!"##"### , |
168 | r#"c"\t\n\r\"\\""# , |
169 | ] { |
170 | match tokens.next().unwrap() { |
171 | TokenTree::Literal(literal) => { |
172 | assert_eq!(literal.to_string(), *expected); |
173 | } |
174 | unexpected => panic!("unexpected token: {:?}" , unexpected), |
175 | } |
176 | } |
177 | |
178 | if let Some(unexpected) = tokens.next() { |
179 | panic!("unexpected token: {:?}" , unexpected); |
180 | } |
181 | |
182 | for invalid in &[r#"c"\0""# , r#"c"\x00""# , r#"c"\u{0}""# , "c \"\0\"" ] { |
183 | if let Ok(unexpected) = invalid.parse::<TokenStream>() { |
184 | panic!("unexpected token: {:?}" , unexpected); |
185 | } |
186 | } |
187 | } |
188 | |
189 | #[test] |
190 | fn literal_character() { |
191 | assert_eq!(Literal::character('x' ).to_string(), "'x'" ); |
192 | assert_eq!(Literal::character(' \'' ).to_string(), "' \\''" ); |
193 | assert_eq!(Literal::character('"' ).to_string(), "' \"'" ); |
194 | } |
195 | |
196 | #[test] |
197 | fn literal_integer() { |
198 | assert_eq!(Literal::u8_suffixed(10).to_string(), "10u8" ); |
199 | assert_eq!(Literal::u16_suffixed(10).to_string(), "10u16" ); |
200 | assert_eq!(Literal::u32_suffixed(10).to_string(), "10u32" ); |
201 | assert_eq!(Literal::u64_suffixed(10).to_string(), "10u64" ); |
202 | assert_eq!(Literal::u128_suffixed(10).to_string(), "10u128" ); |
203 | assert_eq!(Literal::usize_suffixed(10).to_string(), "10usize" ); |
204 | |
205 | assert_eq!(Literal::i8_suffixed(10).to_string(), "10i8" ); |
206 | assert_eq!(Literal::i16_suffixed(10).to_string(), "10i16" ); |
207 | assert_eq!(Literal::i32_suffixed(10).to_string(), "10i32" ); |
208 | assert_eq!(Literal::i64_suffixed(10).to_string(), "10i64" ); |
209 | assert_eq!(Literal::i128_suffixed(10).to_string(), "10i128" ); |
210 | assert_eq!(Literal::isize_suffixed(10).to_string(), "10isize" ); |
211 | |
212 | assert_eq!(Literal::u8_unsuffixed(10).to_string(), "10" ); |
213 | assert_eq!(Literal::u16_unsuffixed(10).to_string(), "10" ); |
214 | assert_eq!(Literal::u32_unsuffixed(10).to_string(), "10" ); |
215 | assert_eq!(Literal::u64_unsuffixed(10).to_string(), "10" ); |
216 | assert_eq!(Literal::u128_unsuffixed(10).to_string(), "10" ); |
217 | assert_eq!(Literal::usize_unsuffixed(10).to_string(), "10" ); |
218 | |
219 | assert_eq!(Literal::i8_unsuffixed(10).to_string(), "10" ); |
220 | assert_eq!(Literal::i16_unsuffixed(10).to_string(), "10" ); |
221 | assert_eq!(Literal::i32_unsuffixed(10).to_string(), "10" ); |
222 | assert_eq!(Literal::i64_unsuffixed(10).to_string(), "10" ); |
223 | assert_eq!(Literal::i128_unsuffixed(10).to_string(), "10" ); |
224 | assert_eq!(Literal::isize_unsuffixed(10).to_string(), "10" ); |
225 | } |
226 | |
227 | #[test] |
228 | fn literal_float() { |
229 | assert_eq!(Literal::f32_suffixed(10.0).to_string(), "10f32" ); |
230 | assert_eq!(Literal::f64_suffixed(10.0).to_string(), "10f64" ); |
231 | |
232 | assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0" ); |
233 | assert_eq!(Literal::f64_unsuffixed(10.0).to_string(), "10.0" ); |
234 | } |
235 | |
236 | #[test] |
237 | fn literal_suffix() { |
238 | fn token_count(p: &str) -> usize { |
239 | p.parse::<TokenStream>().unwrap().into_iter().count() |
240 | } |
241 | |
242 | assert_eq!(token_count("999u256" ), 1); |
243 | assert_eq!(token_count("999r#u256" ), 3); |
244 | assert_eq!(token_count("1." ), 1); |
245 | assert_eq!(token_count("1.f32" ), 3); |
246 | assert_eq!(token_count("1.0_0" ), 1); |
247 | assert_eq!(token_count("1._0" ), 3); |
248 | assert_eq!(token_count("1._m" ), 3); |
249 | assert_eq!(token_count(" \"\"s" ), 1); |
250 | assert_eq!(token_count("r \"\"r" ), 1); |
251 | assert_eq!(token_count("b \"\"b" ), 1); |
252 | assert_eq!(token_count("br \"\"br" ), 1); |
253 | assert_eq!(token_count("r# \"\"#r" ), 1); |
254 | assert_eq!(token_count("'c'c" ), 1); |
255 | assert_eq!(token_count("b'b'b" ), 1); |
256 | assert_eq!(token_count("0E" ), 1); |
257 | assert_eq!(token_count("0o0A" ), 1); |
258 | assert_eq!(token_count("0E--0" ), 4); |
259 | assert_eq!(token_count("0.0ECMA" ), 1); |
260 | } |
261 | |
262 | #[test] |
263 | fn literal_iter_negative() { |
264 | let negative_literal = Literal::i32_suffixed(-3); |
265 | let tokens = TokenStream::from(TokenTree::Literal(negative_literal)); |
266 | let mut iter = tokens.into_iter(); |
267 | match iter.next().unwrap() { |
268 | TokenTree::Punct(punct) => { |
269 | assert_eq!(punct.as_char(), '-' ); |
270 | assert_eq!(punct.spacing(), Spacing::Alone); |
271 | } |
272 | unexpected => panic!("unexpected token {:?}" , unexpected), |
273 | } |
274 | match iter.next().unwrap() { |
275 | TokenTree::Literal(literal) => { |
276 | assert_eq!(literal.to_string(), "3i32" ); |
277 | } |
278 | unexpected => panic!("unexpected token {:?}" , unexpected), |
279 | } |
280 | assert!(iter.next().is_none()); |
281 | } |
282 | |
283 | #[test] |
284 | fn literal_parse() { |
285 | assert!("1" .parse::<Literal>().is_ok()); |
286 | assert!("-1" .parse::<Literal>().is_ok()); |
287 | assert!("-1u12" .parse::<Literal>().is_ok()); |
288 | assert!("1.0" .parse::<Literal>().is_ok()); |
289 | assert!("-1.0" .parse::<Literal>().is_ok()); |
290 | assert!("-1.0f12" .parse::<Literal>().is_ok()); |
291 | assert!("'a'" .parse::<Literal>().is_ok()); |
292 | assert!(" \"\n\"" .parse::<Literal>().is_ok()); |
293 | assert!("0 1" .parse::<Literal>().is_err()); |
294 | assert!(" 0" .parse::<Literal>().is_err()); |
295 | assert!("0 " .parse::<Literal>().is_err()); |
296 | assert!("/* comment */0" .parse::<Literal>().is_err()); |
297 | assert!("0/* comment */" .parse::<Literal>().is_err()); |
298 | assert!("0// comment" .parse::<Literal>().is_err()); |
299 | assert!("- 1" .parse::<Literal>().is_err()); |
300 | assert!("- 1.0" .parse::<Literal>().is_err()); |
301 | assert!("- \"\"" .parse::<Literal>().is_err()); |
302 | } |
303 | |
304 | #[test] |
305 | fn literal_span() { |
306 | let positive = "0.1" .parse::<Literal>().unwrap(); |
307 | let negative = "-0.1" .parse::<Literal>().unwrap(); |
308 | let subspan = positive.subspan(1..2); |
309 | |
310 | #[cfg (not(span_locations))] |
311 | { |
312 | let _ = negative; |
313 | assert!(subspan.is_none()); |
314 | } |
315 | |
316 | #[cfg (span_locations)] |
317 | { |
318 | assert_eq!(positive.span().start().column, 0); |
319 | assert_eq!(positive.span().end().column, 3); |
320 | assert_eq!(negative.span().start().column, 0); |
321 | assert_eq!(negative.span().end().column, 4); |
322 | assert_eq!(subspan.unwrap().source_text().unwrap(), "." ); |
323 | } |
324 | |
325 | assert!(positive.subspan(1..4).is_none()); |
326 | } |
327 | |
328 | #[cfg (span_locations)] |
329 | #[test] |
330 | fn source_text() { |
331 | let input = " 𓀕 a z " ; |
332 | let mut tokens = input |
333 | .parse::<proc_macro2::TokenStream>() |
334 | .unwrap() |
335 | .into_iter(); |
336 | |
337 | let first = tokens.next().unwrap(); |
338 | assert_eq!("𓀕" , first.span().source_text().unwrap()); |
339 | |
340 | let second = tokens.next().unwrap(); |
341 | let third = tokens.next().unwrap(); |
342 | assert_eq!("z" , third.span().source_text().unwrap()); |
343 | assert_eq!("a" , second.span().source_text().unwrap()); |
344 | } |
345 | |
346 | #[test] |
347 | fn roundtrip() { |
348 | fn roundtrip(p: &str) { |
349 | println!("parse: {}" , p); |
350 | let s = p.parse::<TokenStream>().unwrap().to_string(); |
351 | println!("first: {}" , s); |
352 | let s2 = s.parse::<TokenStream>().unwrap().to_string(); |
353 | assert_eq!(s, s2); |
354 | } |
355 | roundtrip("a" ); |
356 | roundtrip("<<" ); |
357 | roundtrip("<<=" ); |
358 | roundtrip( |
359 | " |
360 | 1 |
361 | 1.0 |
362 | 1f32 |
363 | 2f64 |
364 | 1usize |
365 | 4isize |
366 | 4e10 |
367 | 1_000 |
368 | 1_0i32 |
369 | 8u8 |
370 | 9 |
371 | 0 |
372 | 0xffffffffffffffffffffffffffffffff |
373 | 1x |
374 | 1u80 |
375 | 1f320 |
376 | " , |
377 | ); |
378 | roundtrip("'a" ); |
379 | roundtrip("'_" ); |
380 | roundtrip("'static" ); |
381 | roundtrip("' \\u{10__FFFF}'" ); |
382 | roundtrip(" \"\\u{10_F0FF__}foo \\u{1_0_0_0__} \"" ); |
383 | } |
384 | |
385 | #[test] |
386 | fn fail() { |
387 | fn fail(p: &str) { |
388 | if let Ok(s) = p.parse::<TokenStream>() { |
389 | panic!("should have failed to parse: {} \n{:#?}" , p, s); |
390 | } |
391 | } |
392 | fail("' static" ); |
393 | fail("r#1" ); |
394 | fail("r#_" ); |
395 | fail(" \"\\u{0000000} \"" ); // overlong unicode escape (rust allows at most 6 hex digits) |
396 | fail(" \"\\u{999999} \"" ); // outside of valid range of char |
397 | fail(" \"\\u{_0} \"" ); // leading underscore |
398 | fail(" \"\\u{} \"" ); // empty |
399 | fail("b \"\r\"" ); // bare carriage return in byte string |
400 | fail("r \"\r\"" ); // bare carriage return in raw string |
401 | fail(" \"\\\r \"" ); // backslash carriage return |
402 | fail("'aa'aa" ); |
403 | fail("br## \"\"#" ); |
404 | fail(" \"\\\n\u{85}\r\"" ); |
405 | } |
406 | |
407 | #[cfg (span_locations)] |
408 | #[test] |
409 | fn span_test() { |
410 | check_spans( |
411 | "\ |
412 | /// This is a document comment |
413 | testing 123 |
414 | { |
415 | testing 234 |
416 | }" , |
417 | &[ |
418 | (1, 0, 1, 30), // # |
419 | (1, 0, 1, 30), // [ ... ] |
420 | (1, 0, 1, 30), // doc |
421 | (1, 0, 1, 30), // = |
422 | (1, 0, 1, 30), // "This is..." |
423 | (2, 0, 2, 7), // testing |
424 | (2, 8, 2, 11), // 123 |
425 | (3, 0, 5, 1), // { ... } |
426 | (4, 2, 4, 9), // testing |
427 | (4, 10, 4, 13), // 234 |
428 | ], |
429 | ); |
430 | } |
431 | |
432 | #[cfg (procmacro2_semver_exempt)] |
433 | #[cfg (not(nightly))] |
434 | #[test] |
435 | fn default_span() { |
436 | let start = Span::call_site().start(); |
437 | assert_eq!(start.line, 1); |
438 | assert_eq!(start.column, 0); |
439 | let end = Span::call_site().end(); |
440 | assert_eq!(end.line, 1); |
441 | assert_eq!(end.column, 0); |
442 | let source_file = Span::call_site().source_file(); |
443 | assert_eq!(source_file.path().to_string_lossy(), "<unspecified>" ); |
444 | assert!(!source_file.is_real()); |
445 | } |
446 | |
447 | #[cfg (procmacro2_semver_exempt)] |
448 | #[test] |
449 | fn span_join() { |
450 | let source1 = "aaa \nbbb" |
451 | .parse::<TokenStream>() |
452 | .unwrap() |
453 | .into_iter() |
454 | .collect::<Vec<_>>(); |
455 | let source2 = "ccc \nddd" |
456 | .parse::<TokenStream>() |
457 | .unwrap() |
458 | .into_iter() |
459 | .collect::<Vec<_>>(); |
460 | |
461 | assert!(source1[0].span().source_file() != source2[0].span().source_file()); |
462 | assert_eq!( |
463 | source1[0].span().source_file(), |
464 | source1[1].span().source_file() |
465 | ); |
466 | |
467 | let joined1 = source1[0].span().join(source1[1].span()); |
468 | let joined2 = source1[0].span().join(source2[0].span()); |
469 | assert!(joined1.is_some()); |
470 | assert!(joined2.is_none()); |
471 | |
472 | let start = joined1.unwrap().start(); |
473 | let end = joined1.unwrap().end(); |
474 | assert_eq!(start.line, 1); |
475 | assert_eq!(start.column, 0); |
476 | assert_eq!(end.line, 2); |
477 | assert_eq!(end.column, 3); |
478 | |
479 | assert_eq!( |
480 | joined1.unwrap().source_file(), |
481 | source1[0].span().source_file() |
482 | ); |
483 | } |
484 | |
485 | #[test] |
486 | fn no_panic() { |
487 | let s = str::from_utf8(b"b \'\xc2\x86 \x00\x00\x00^ \"" ).unwrap(); |
488 | assert!(s.parse::<TokenStream>().is_err()); |
489 | } |
490 | |
491 | #[test] |
492 | fn punct_before_comment() { |
493 | let mut tts = TokenStream::from_str("~// comment" ).unwrap().into_iter(); |
494 | match tts.next().unwrap() { |
495 | TokenTree::Punct(tt) => { |
496 | assert_eq!(tt.as_char(), '~' ); |
497 | assert_eq!(tt.spacing(), Spacing::Alone); |
498 | } |
499 | wrong => panic!("wrong token {:?}" , wrong), |
500 | } |
501 | } |
502 | |
503 | #[test] |
504 | fn joint_last_token() { |
505 | // This test verifies that we match the behavior of libproc_macro *not* in |
506 | // the range nightly-2020-09-06 through nightly-2020-09-10, in which this |
507 | // behavior was temporarily broken. |
508 | // See https://github.com/rust-lang/rust/issues/76399 |
509 | |
510 | let joint_punct = Punct::new(':' , Spacing::Joint); |
511 | let stream = TokenStream::from(TokenTree::Punct(joint_punct)); |
512 | let punct = match stream.into_iter().next().unwrap() { |
513 | TokenTree::Punct(punct) => punct, |
514 | _ => unreachable!(), |
515 | }; |
516 | assert_eq!(punct.spacing(), Spacing::Joint); |
517 | } |
518 | |
519 | #[test] |
520 | fn raw_identifier() { |
521 | let mut tts = TokenStream::from_str("r#dyn" ).unwrap().into_iter(); |
522 | match tts.next().unwrap() { |
523 | TokenTree::Ident(raw) => assert_eq!("r#dyn" , raw.to_string()), |
524 | wrong => panic!("wrong token {:?}" , wrong), |
525 | } |
526 | assert!(tts.next().is_none()); |
527 | } |
528 | |
529 | #[test] |
530 | fn test_debug_ident() { |
531 | let ident = Ident::new("proc_macro" , Span::call_site()); |
532 | |
533 | #[cfg (not(span_locations))] |
534 | let expected = "Ident(proc_macro)" ; |
535 | |
536 | #[cfg (span_locations)] |
537 | let expected = "Ident { sym: proc_macro }" ; |
538 | |
539 | assert_eq!(expected, format!("{:?}" , ident)); |
540 | } |
541 | |
542 | #[test] |
543 | fn test_debug_tokenstream() { |
544 | let tts = TokenStream::from_str("[a + 1]" ).unwrap(); |
545 | |
546 | #[cfg (not(span_locations))] |
547 | let expected = "\ |
548 | TokenStream [ |
549 | Group { |
550 | delimiter: Bracket, |
551 | stream: TokenStream [ |
552 | Ident { |
553 | sym: a, |
554 | }, |
555 | Punct { |
556 | char: '+', |
557 | spacing: Alone, |
558 | }, |
559 | Literal { |
560 | lit: 1, |
561 | }, |
562 | ], |
563 | }, |
564 | ]\ |
565 | " ; |
566 | |
567 | #[cfg (not(span_locations))] |
568 | let expected_before_trailing_commas = "\ |
569 | TokenStream [ |
570 | Group { |
571 | delimiter: Bracket, |
572 | stream: TokenStream [ |
573 | Ident { |
574 | sym: a |
575 | }, |
576 | Punct { |
577 | char: '+', |
578 | spacing: Alone |
579 | }, |
580 | Literal { |
581 | lit: 1 |
582 | } |
583 | ] |
584 | } |
585 | ]\ |
586 | " ; |
587 | |
588 | #[cfg (span_locations)] |
589 | let expected = "\ |
590 | TokenStream [ |
591 | Group { |
592 | delimiter: Bracket, |
593 | stream: TokenStream [ |
594 | Ident { |
595 | sym: a, |
596 | span: bytes(2..3), |
597 | }, |
598 | Punct { |
599 | char: '+', |
600 | spacing: Alone, |
601 | span: bytes(4..5), |
602 | }, |
603 | Literal { |
604 | lit: 1, |
605 | span: bytes(6..7), |
606 | }, |
607 | ], |
608 | span: bytes(1..8), |
609 | }, |
610 | ]\ |
611 | " ; |
612 | |
613 | #[cfg (span_locations)] |
614 | let expected_before_trailing_commas = "\ |
615 | TokenStream [ |
616 | Group { |
617 | delimiter: Bracket, |
618 | stream: TokenStream [ |
619 | Ident { |
620 | sym: a, |
621 | span: bytes(2..3) |
622 | }, |
623 | Punct { |
624 | char: '+', |
625 | spacing: Alone, |
626 | span: bytes(4..5) |
627 | }, |
628 | Literal { |
629 | lit: 1, |
630 | span: bytes(6..7) |
631 | } |
632 | ], |
633 | span: bytes(1..8) |
634 | } |
635 | ]\ |
636 | " ; |
637 | |
638 | let actual = format!("{:#?}" , tts); |
639 | if actual.ends_with(", \n]" ) { |
640 | assert_eq!(expected, actual); |
641 | } else { |
642 | assert_eq!(expected_before_trailing_commas, actual); |
643 | } |
644 | } |
645 | |
646 | #[test] |
647 | fn default_tokenstream_is_empty() { |
648 | let default_token_stream = <TokenStream as Default>::default(); |
649 | |
650 | assert!(default_token_stream.is_empty()); |
651 | } |
652 | |
653 | #[test] |
654 | fn tokenstream_size_hint() { |
655 | let tokens = "a b (c d) e" .parse::<TokenStream>().unwrap(); |
656 | |
657 | assert_eq!(tokens.into_iter().size_hint(), (4, Some(4))); |
658 | } |
659 | |
660 | #[test] |
661 | fn tuple_indexing() { |
662 | // This behavior may change depending on https://github.com/rust-lang/rust/pull/71322 |
663 | let mut tokens = "tuple.0.0" .parse::<TokenStream>().unwrap().into_iter(); |
664 | assert_eq!("tuple" , tokens.next().unwrap().to_string()); |
665 | assert_eq!("." , tokens.next().unwrap().to_string()); |
666 | assert_eq!("0.0" , tokens.next().unwrap().to_string()); |
667 | assert!(tokens.next().is_none()); |
668 | } |
669 | |
670 | #[cfg (span_locations)] |
671 | #[test] |
672 | fn non_ascii_tokens() { |
673 | check_spans("// abc" , &[]); |
674 | check_spans("// ábc" , &[]); |
675 | check_spans("// abc x" , &[]); |
676 | check_spans("// ábc x" , &[]); |
677 | check_spans("/* abc */ x" , &[(1, 10, 1, 11)]); |
678 | check_spans("/* ábc */ x" , &[(1, 10, 1, 11)]); |
679 | check_spans("/* ab \nc */ x" , &[(2, 5, 2, 6)]); |
680 | check_spans("/* áb \nc */ x" , &[(2, 5, 2, 6)]); |
681 | check_spans("/*** abc */ x" , &[(1, 12, 1, 13)]); |
682 | check_spans("/*** ábc */ x" , &[(1, 12, 1, 13)]); |
683 | check_spans(r#""abc""# , &[(1, 0, 1, 5)]); |
684 | check_spans(r#""ábc""# , &[(1, 0, 1, 5)]); |
685 | check_spans(r##"r#"abc"#"## , &[(1, 0, 1, 8)]); |
686 | check_spans(r##"r#"ábc"#"## , &[(1, 0, 1, 8)]); |
687 | check_spans("r# \"a \nc \"#" , &[(1, 0, 2, 3)]); |
688 | check_spans("r# \"á \nc \"#" , &[(1, 0, 2, 3)]); |
689 | check_spans("'a'" , &[(1, 0, 1, 3)]); |
690 | check_spans("'á'" , &[(1, 0, 1, 3)]); |
691 | check_spans("//! abc" , &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); |
692 | check_spans("//! ábc" , &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); |
693 | check_spans("//! abc \n" , &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); |
694 | check_spans("//! ábc \n" , &[(1, 0, 1, 7), (1, 0, 1, 7), (1, 0, 1, 7)]); |
695 | check_spans("/*! abc */" , &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); |
696 | check_spans("/*! ábc */" , &[(1, 0, 1, 10), (1, 0, 1, 10), (1, 0, 1, 10)]); |
697 | check_spans("/*! a \nc */" , &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); |
698 | check_spans("/*! á \nc */" , &[(1, 0, 2, 4), (1, 0, 2, 4), (1, 0, 2, 4)]); |
699 | check_spans("abc" , &[(1, 0, 1, 3)]); |
700 | check_spans("ábc" , &[(1, 0, 1, 3)]); |
701 | check_spans("ábć" , &[(1, 0, 1, 3)]); |
702 | check_spans("abc// foo" , &[(1, 0, 1, 3)]); |
703 | check_spans("ábc// foo" , &[(1, 0, 1, 3)]); |
704 | check_spans("ábć// foo" , &[(1, 0, 1, 3)]); |
705 | check_spans("b \"a \\\n c \"" , &[(1, 0, 2, 3)]); |
706 | } |
707 | |
708 | #[cfg (span_locations)] |
709 | fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) { |
710 | let ts = p.parse::<TokenStream>().unwrap(); |
711 | check_spans_internal(ts, &mut lines); |
712 | assert!(lines.is_empty(), "leftover ranges: {:?}" , lines); |
713 | } |
714 | |
715 | #[cfg (span_locations)] |
716 | fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) { |
717 | for i in ts { |
718 | if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() { |
719 | *lines = rest; |
720 | |
721 | let start = i.span().start(); |
722 | assert_eq!(start.line, sline, "sline did not match for {}" , i); |
723 | assert_eq!(start.column, scol, "scol did not match for {}" , i); |
724 | |
725 | let end = i.span().end(); |
726 | assert_eq!(end.line, eline, "eline did not match for {}" , i); |
727 | assert_eq!(end.column, ecol, "ecol did not match for {}" , i); |
728 | |
729 | if let TokenTree::Group(g) = i { |
730 | check_spans_internal(g.stream().clone(), lines); |
731 | } |
732 | } |
733 | } |
734 | } |
735 | |
736 | #[test] |
737 | fn whitespace() { |
738 | // space, horizontal tab, vertical tab, form feed, carriage return, line |
739 | // feed, non-breaking space, left-to-right mark, right-to-left mark |
740 | let various_spaces = " \t\u{b}\u{c}\r\n\u{a0}\u{200e}\u{200f}" ; |
741 | let tokens = various_spaces.parse::<TokenStream>().unwrap(); |
742 | assert_eq!(tokens.into_iter().count(), 0); |
743 | |
744 | let lone_carriage_returns = " \r \r\r\n " ; |
745 | lone_carriage_returns.parse::<TokenStream>().unwrap(); |
746 | } |
747 | |
748 | #[test] |
749 | fn byte_order_mark() { |
750 | let string = " \u{feff}foo" ; |
751 | let tokens = string.parse::<TokenStream>().unwrap(); |
752 | match tokens.into_iter().next().unwrap() { |
753 | TokenTree::Ident(ident) => assert_eq!(ident, "foo" ), |
754 | _ => unreachable!(), |
755 | } |
756 | |
757 | let string = "foo \u{feff}" ; |
758 | string.parse::<TokenStream>().unwrap_err(); |
759 | } |
760 | |