1 | /* Classes for modeling the state of memory. |
2 | Copyright (C) 2020-2024 Free Software Foundation, Inc. |
3 | Contributed by David Malcolm <dmalcolm@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it |
8 | under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #define INCLUDE_MEMORY |
23 | #include "system.h" |
24 | #include "coretypes.h" |
25 | #include "tree.h" |
26 | #include "function.h" |
27 | #include "basic-block.h" |
28 | #include "gimple.h" |
29 | #include "gimple-iterator.h" |
30 | #include "diagnostic-core.h" |
31 | #include "graphviz.h" |
32 | #include "options.h" |
33 | #include "cgraph.h" |
34 | #include "tree-dfa.h" |
35 | #include "stringpool.h" |
36 | #include "convert.h" |
37 | #include "target.h" |
38 | #include "fold-const.h" |
39 | #include "tree-pretty-print.h" |
40 | #include "diagnostic-color.h" |
41 | #include "bitmap.h" |
42 | #include "selftest.h" |
43 | #include "analyzer/analyzer.h" |
44 | #include "analyzer/analyzer-logging.h" |
45 | #include "ordered-hash-map.h" |
46 | #include "options.h" |
47 | #include "cfg.h" |
48 | #include "analyzer/supergraph.h" |
49 | #include "sbitmap.h" |
50 | #include "analyzer/call-string.h" |
51 | #include "analyzer/program-point.h" |
52 | #include "analyzer/store.h" |
53 | #include "analyzer/region-model.h" |
54 | #include "analyzer/call-summary.h" |
55 | #include "analyzer/analyzer-selftests.h" |
56 | #include "stor-layout.h" |
57 | |
58 | #if ENABLE_ANALYZER |
59 | |
60 | namespace ana { |
61 | |
62 | /* Dump SVALS to PP, sorting them to ensure determinism. */ |
63 | |
64 | static void |
65 | dump_svalue_set (const hash_set <const svalue *> &svals, |
66 | pretty_printer *pp, bool simple) |
67 | { |
68 | auto_vec <const svalue *> v; |
69 | for (hash_set<const svalue *>::iterator iter = svals.begin (); |
70 | iter != svals.end (); ++iter) |
71 | { |
72 | v.safe_push (obj: *iter); |
73 | } |
74 | v.qsort (svalue::cmp_ptr_ptr); |
75 | |
76 | pp_character (pp, '{'); |
77 | const svalue *sval; |
78 | unsigned i; |
79 | FOR_EACH_VEC_ELT (v, i, sval) |
80 | { |
81 | if (i > 0) |
82 | pp_string (pp, ", " ); |
83 | sval->dump_to_pp (pp, simple); |
84 | } |
85 | pp_character (pp, '}'); |
86 | } |
87 | |
88 | /* class uncertainty_t. */ |
89 | |
90 | /* Dump this object to PP. */ |
91 | |
92 | void |
93 | uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const |
94 | { |
95 | pp_string (pp, "{m_maybe_bound_svals: " ); |
96 | dump_svalue_set (svals: m_maybe_bound_svals, pp, simple); |
97 | |
98 | pp_string (pp, ", m_mutable_at_unknown_call_svals: " ); |
99 | dump_svalue_set (svals: m_mutable_at_unknown_call_svals, pp, simple); |
100 | pp_string (pp, "}" ); |
101 | } |
102 | |
103 | /* Dump this object to stderr. */ |
104 | |
105 | DEBUG_FUNCTION void |
106 | uncertainty_t::dump (bool simple) const |
107 | { |
108 | pretty_printer pp; |
109 | pp_format_decoder (&pp) = default_tree_printer; |
110 | pp_show_color (&pp) = pp_show_color (global_dc->printer); |
111 | pp.buffer->stream = stderr; |
112 | dump_to_pp (pp: &pp, simple); |
113 | pp_newline (&pp); |
114 | pp_flush (&pp); |
115 | } |
116 | |
117 | /* class binding_key. */ |
118 | |
119 | const binding_key * |
120 | binding_key::make (store_manager *mgr, const region *r) |
121 | { |
122 | region_offset offset = r->get_offset (mgr: mgr->get_svalue_manager ()); |
123 | if (offset.symbolic_p ()) |
124 | return mgr->get_symbolic_binding (region: r); |
125 | else |
126 | { |
127 | bit_size_t bit_size; |
128 | if (r->get_bit_size (out: &bit_size)) |
129 | { |
130 | /* Must be non-empty. */ |
131 | gcc_assert (bit_size > 0); |
132 | return mgr->get_concrete_binding (start_bit_offset: offset.get_bit_offset (), |
133 | size_in_bits: bit_size); |
134 | } |
135 | else |
136 | return mgr->get_symbolic_binding (region: r); |
137 | } |
138 | } |
139 | |
140 | /* Dump this binding_key to stderr. */ |
141 | |
142 | DEBUG_FUNCTION void |
143 | binding_key::dump (bool simple) const |
144 | { |
145 | pretty_printer pp; |
146 | pp_format_decoder (&pp) = default_tree_printer; |
147 | pp_show_color (&pp) = pp_show_color (global_dc->printer); |
148 | pp.buffer->stream = stderr; |
149 | dump_to_pp (pp: &pp, simple); |
150 | pp_newline (&pp); |
151 | pp_flush (&pp); |
152 | } |
153 | |
154 | /* Get a description of this binding_key. */ |
155 | |
156 | label_text |
157 | binding_key::get_desc (bool simple) const |
158 | { |
159 | pretty_printer pp; |
160 | pp_format_decoder (&pp) = default_tree_printer; |
161 | dump_to_pp (pp: &pp, simple); |
162 | return label_text::take (buffer: xstrdup (pp_formatted_text (&pp))); |
163 | } |
164 | |
165 | /* qsort callback. */ |
166 | |
167 | int |
168 | binding_key::cmp_ptrs (const void *p1, const void *p2) |
169 | { |
170 | const binding_key * const *pk1 = (const binding_key * const *)p1; |
171 | const binding_key * const *pk2 = (const binding_key * const *)p2; |
172 | return cmp (*pk1, *pk2); |
173 | } |
174 | |
175 | /* Comparator for binding_keys. */ |
176 | |
177 | int |
178 | binding_key::cmp (const binding_key *k1, const binding_key *k2) |
179 | { |
180 | int concrete1 = k1->concrete_p (); |
181 | int concrete2 = k2->concrete_p (); |
182 | if (int concrete_cmp = concrete1 - concrete2) |
183 | return concrete_cmp; |
184 | if (concrete1) |
185 | { |
186 | const concrete_binding *b1 = (const concrete_binding *)k1; |
187 | const concrete_binding *b2 = (const concrete_binding *)k2; |
188 | if (int start_cmp = wi::cmp (x: b1->get_start_bit_offset (), |
189 | y: b2->get_start_bit_offset (), |
190 | sgn: SIGNED)) |
191 | return start_cmp; |
192 | return wi::cmp (x: b1->get_next_bit_offset (), y: b2->get_next_bit_offset (), |
193 | sgn: SIGNED); |
194 | } |
195 | else |
196 | { |
197 | const symbolic_binding *s1 = (const symbolic_binding *)k1; |
198 | const symbolic_binding *s2 = (const symbolic_binding *)k2; |
199 | if (s1 > s2) |
200 | return 1; |
201 | if (s1 < s2) |
202 | return -1; |
203 | return 0; |
204 | } |
205 | } |
206 | |
207 | /* struct bit_range. */ |
208 | |
209 | void |
210 | bit_range::dump_to_pp (pretty_printer *pp) const |
211 | { |
212 | byte_range bytes (0, 0); |
213 | if (as_byte_range (out: &bytes)) |
214 | bytes.dump_to_pp (pp); |
215 | else |
216 | { |
217 | pp_string (pp, "start: " ); |
218 | pp_wide_int (pp, w: m_start_bit_offset, sgn: SIGNED); |
219 | pp_string (pp, ", size: " ); |
220 | pp_wide_int (pp, w: m_size_in_bits, sgn: SIGNED); |
221 | pp_string (pp, ", next: " ); |
222 | pp_wide_int (pp, w: get_next_bit_offset (), sgn: SIGNED); |
223 | } |
224 | } |
225 | |
226 | /* Dump this object to stderr. */ |
227 | |
228 | DEBUG_FUNCTION void |
229 | bit_range::dump () const |
230 | { |
231 | pretty_printer pp; |
232 | pp.buffer->stream = stderr; |
233 | dump_to_pp (pp: &pp); |
234 | pp_newline (&pp); |
235 | pp_flush (&pp); |
236 | } |
237 | |
238 | /* Generate a JSON value for this bit_range. |
239 | This is intended for debugging the analyzer rather |
240 | than serialization. */ |
241 | |
242 | json::object * |
243 | bit_range::to_json () const |
244 | { |
245 | json::object *obj = new json::object (); |
246 | obj->set (key: "start_bit_offset" , |
247 | v: bit_offset_to_json (offset: m_start_bit_offset)); |
248 | obj->set (key: "size_in_bits" , |
249 | v: bit_offset_to_json (offset: m_size_in_bits)); |
250 | return obj; |
251 | } |
252 | |
253 | /* If OTHER is a subset of this, return true and, if OUT is |
254 | non-null, write to *OUT the relative range of OTHER within this. |
255 | Otherwise return false. */ |
256 | |
257 | bool |
258 | bit_range::contains_p (const bit_range &other, bit_range *out) const |
259 | { |
260 | if (contains_p (offset: other.get_start_bit_offset ()) |
261 | && contains_p (offset: other.get_last_bit_offset ())) |
262 | { |
263 | if (out) |
264 | { |
265 | out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset; |
266 | out->m_size_in_bits = other.m_size_in_bits; |
267 | } |
268 | return true; |
269 | } |
270 | else |
271 | return false; |
272 | } |
273 | |
274 | /* If OTHER intersects this, return true and write |
275 | the relative range of OTHER within THIS to *OUT_THIS, |
276 | and the relative range of THIS within OTHER to *OUT_OTHER. |
277 | Otherwise return false. */ |
278 | |
279 | bool |
280 | bit_range::intersects_p (const bit_range &other, |
281 | bit_range *out_this, |
282 | bit_range *out_other) const |
283 | { |
284 | if (get_start_bit_offset () < other.get_next_bit_offset () |
285 | && other.get_start_bit_offset () < get_next_bit_offset ()) |
286 | { |
287 | bit_offset_t overlap_start |
288 | = MAX (get_start_bit_offset (), |
289 | other.get_start_bit_offset ()); |
290 | bit_offset_t overlap_next |
291 | = MIN (get_next_bit_offset (), |
292 | other.get_next_bit_offset ()); |
293 | if (overlap_next <= overlap_start) |
294 | /* If this has happened, some kind of overflow has happened in |
295 | our arithmetic. For now, reject such cases. */ |
296 | return false; |
297 | bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start); |
298 | *out_this = abs_overlap_bits - get_start_bit_offset (); |
299 | *out_other = abs_overlap_bits - other.get_start_bit_offset (); |
300 | return true; |
301 | } |
302 | else |
303 | return false; |
304 | } |
305 | |
306 | /* Return true if THIS and OTHER intersect and write the number |
307 | of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS. |
308 | |
309 | Otherwise return false. */ |
310 | |
311 | bool |
312 | bit_range::intersects_p (const bit_range &other, |
313 | bit_size_t *out_num_overlap_bits) const |
314 | { |
315 | if (get_start_bit_offset () < other.get_next_bit_offset () |
316 | && other.get_start_bit_offset () < get_next_bit_offset ()) |
317 | { |
318 | bit_offset_t overlap_start = MAX (get_start_bit_offset (), |
319 | other.get_start_bit_offset ()); |
320 | bit_offset_t overlap_next = MIN (get_next_bit_offset (), |
321 | other.get_next_bit_offset ()); |
322 | if (overlap_next <= overlap_start) |
323 | /* If this has happened, some kind of overflow has happened in |
324 | our arithmetic. For now, reject such cases. */ |
325 | return false; |
326 | *out_num_overlap_bits = overlap_next - overlap_start; |
327 | return true; |
328 | } |
329 | else |
330 | return false; |
331 | } |
332 | |
333 | /* Return true if THIS exceeds OTHER and write the overhanging |
334 | bit range to OUT_OVERHANGING_BIT_RANGE. */ |
335 | |
336 | bool |
337 | bit_range::exceeds_p (const bit_range &other, |
338 | bit_range *out_overhanging_bit_range) const |
339 | { |
340 | gcc_assert (!empty_p ()); |
341 | |
342 | if (other.get_next_bit_offset () < get_next_bit_offset ()) |
343 | { |
344 | /* THIS definitely exceeds OTHER. */ |
345 | bit_offset_t start = MAX (get_start_bit_offset (), |
346 | other.get_next_bit_offset ()); |
347 | bit_offset_t size = get_next_bit_offset () - start; |
348 | if (size <= 0) |
349 | /* If this has happened, some kind of overflow has happened in |
350 | our arithmetic. For now, reject such cases. */ |
351 | return false; |
352 | out_overhanging_bit_range->m_start_bit_offset = start; |
353 | out_overhanging_bit_range->m_size_in_bits = size; |
354 | return true; |
355 | } |
356 | else |
357 | return false; |
358 | } |
359 | |
360 | /* Return true if THIS falls short of OFFSET and write the |
361 | bit range fallen short to OUT_FALL_SHORT_BITS. */ |
362 | |
363 | bool |
364 | bit_range::falls_short_of_p (bit_offset_t offset, |
365 | bit_range *out_fall_short_bits) const |
366 | { |
367 | gcc_assert (!empty_p ()); |
368 | |
369 | if (get_start_bit_offset () < offset) |
370 | { |
371 | /* THIS falls short of OFFSET. */ |
372 | bit_offset_t start = get_start_bit_offset (); |
373 | bit_offset_t size = MIN (offset, get_next_bit_offset ()) - start; |
374 | if (size <= 0) |
375 | /* If this has happened, some kind of overflow has happened in |
376 | our arithmetic. For now, reject such cases. */ |
377 | return false; |
378 | out_fall_short_bits->m_start_bit_offset = start; |
379 | out_fall_short_bits->m_size_in_bits = size; |
380 | return true; |
381 | } |
382 | else |
383 | return false; |
384 | } |
385 | |
386 | int |
387 | bit_range::cmp (const bit_range &br1, const bit_range &br2) |
388 | { |
389 | if (int start_cmp = wi::cmps (x: br1.m_start_bit_offset, |
390 | y: br2.m_start_bit_offset)) |
391 | return start_cmp; |
392 | |
393 | return wi::cmpu (x: br1.m_size_in_bits, y: br2.m_size_in_bits); |
394 | } |
395 | |
396 | /* Offset this range by OFFSET. */ |
397 | |
398 | bit_range |
399 | bit_range::operator- (bit_offset_t offset) const |
400 | { |
401 | return bit_range (m_start_bit_offset - offset, m_size_in_bits); |
402 | } |
403 | |
404 | /* If MASK is a contiguous range of set bits, write them |
405 | to *OUT and return true. |
406 | Otherwise return false. */ |
407 | |
408 | bool |
409 | bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out) |
410 | { |
411 | unsigned iter_bit_idx = 0; |
412 | unsigned HOST_WIDE_INT iter_bit_mask = 1; |
413 | |
414 | /* Find the first contiguous run of set bits in MASK. */ |
415 | |
416 | /* Find first set bit in MASK. */ |
417 | while (iter_bit_idx < HOST_BITS_PER_WIDE_INT) |
418 | { |
419 | if (mask & iter_bit_mask) |
420 | break; |
421 | iter_bit_idx++; |
422 | iter_bit_mask <<= 1; |
423 | } |
424 | if (iter_bit_idx == HOST_BITS_PER_WIDE_INT) |
425 | /* MASK is zero. */ |
426 | return false; |
427 | |
428 | unsigned first_set_iter_bit_idx = iter_bit_idx; |
429 | unsigned num_set_bits = 1; |
430 | iter_bit_idx++; |
431 | iter_bit_mask <<= 1; |
432 | |
433 | /* Find next unset bit in MASK. */ |
434 | while (iter_bit_idx < HOST_BITS_PER_WIDE_INT) |
435 | { |
436 | if (!(mask & iter_bit_mask)) |
437 | break; |
438 | num_set_bits++; |
439 | iter_bit_idx++; |
440 | iter_bit_mask <<= 1; |
441 | } |
442 | if (iter_bit_idx == HOST_BITS_PER_WIDE_INT) |
443 | { |
444 | *out = bit_range (first_set_iter_bit_idx, num_set_bits); |
445 | return true; |
446 | } |
447 | |
448 | /* We now have the first contiguous run of set bits in MASK. |
449 | Fail if any other bits are set. */ |
450 | while (iter_bit_idx < HOST_BITS_PER_WIDE_INT) |
451 | { |
452 | if (mask & iter_bit_mask) |
453 | return false; |
454 | iter_bit_idx++; |
455 | iter_bit_mask <<= 1; |
456 | } |
457 | |
458 | *out = bit_range (first_set_iter_bit_idx, num_set_bits); |
459 | return true; |
460 | } |
461 | |
462 | /* Attempt to convert this bit_range to a byte_range. |
463 | Return true if it is possible, writing the result to *OUT. |
464 | Otherwise return false. */ |
465 | |
466 | bool |
467 | bit_range::as_byte_range (byte_range *out) const |
468 | { |
469 | if (m_start_bit_offset % BITS_PER_UNIT == 0 |
470 | && m_size_in_bits % BITS_PER_UNIT == 0) |
471 | { |
472 | out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT; |
473 | out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT; |
474 | return true; |
475 | } |
476 | return false; |
477 | } |
478 | |
479 | /* Dump this object to PP. */ |
480 | |
481 | void |
482 | byte_range::dump_to_pp (pretty_printer *pp) const |
483 | { |
484 | if (m_size_in_bytes == 0) |
485 | { |
486 | pp_string (pp, "empty" ); |
487 | } |
488 | else if (m_size_in_bytes == 1) |
489 | { |
490 | pp_string (pp, "byte " ); |
491 | pp_wide_int (pp, w: m_start_byte_offset, sgn: SIGNED); |
492 | } |
493 | else |
494 | { |
495 | pp_string (pp, "bytes " ); |
496 | pp_wide_int (pp, w: m_start_byte_offset, sgn: SIGNED); |
497 | pp_string (pp, "-" ); |
498 | pp_wide_int (pp, w: get_last_byte_offset (), sgn: SIGNED); |
499 | } |
500 | } |
501 | |
502 | /* Dump this object to stderr. */ |
503 | |
504 | DEBUG_FUNCTION void |
505 | byte_range::dump () const |
506 | { |
507 | pretty_printer pp; |
508 | pp.buffer->stream = stderr; |
509 | dump_to_pp (pp: &pp); |
510 | pp_newline (&pp); |
511 | pp_flush (&pp); |
512 | } |
513 | |
514 | /* Generate a JSON value for this byte_range. |
515 | This is intended for debugging the analyzer rather |
516 | than serialization. */ |
517 | |
518 | json::object * |
519 | byte_range::to_json () const |
520 | { |
521 | json::object *obj = new json::object (); |
522 | obj->set (key: "start_byte_offset" , |
523 | v: byte_offset_to_json (offset: m_start_byte_offset)); |
524 | obj->set (key: "size_in_bytes" , |
525 | v: byte_offset_to_json (offset: m_size_in_bytes)); |
526 | return obj; |
527 | } |
528 | |
529 | /* If OTHER is a subset of this, return true and write |
530 | to *OUT the relative range of OTHER within this. |
531 | Otherwise return false. */ |
532 | |
533 | bool |
534 | byte_range::contains_p (const byte_range &other, byte_range *out) const |
535 | { |
536 | if (contains_p (offset: other.get_start_byte_offset ()) |
537 | && contains_p (offset: other.get_last_byte_offset ())) |
538 | { |
539 | out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset; |
540 | out->m_size_in_bytes = other.m_size_in_bytes; |
541 | return true; |
542 | } |
543 | else |
544 | return false; |
545 | } |
546 | |
547 | /* qsort comparator for byte ranges. */ |
548 | |
549 | int |
550 | byte_range::cmp (const byte_range &br1, const byte_range &br2) |
551 | { |
552 | /* Order first by offset. */ |
553 | if (int start_cmp = wi::cmps (x: br1.m_start_byte_offset, |
554 | y: br2.m_start_byte_offset)) |
555 | return start_cmp; |
556 | |
557 | /* ...then by size. */ |
558 | return wi::cmpu (x: br1.m_size_in_bytes, y: br2.m_size_in_bytes); |
559 | } |
560 | |
561 | /* class concrete_binding : public binding_key. */ |
562 | |
563 | /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */ |
564 | |
565 | void |
566 | concrete_binding::dump_to_pp (pretty_printer *pp, bool) const |
567 | { |
568 | m_bit_range.dump_to_pp (pp); |
569 | } |
570 | |
571 | /* Return true if this binding overlaps with OTHER. */ |
572 | |
573 | bool |
574 | concrete_binding::overlaps_p (const concrete_binding &other) const |
575 | { |
576 | if (get_start_bit_offset () < other.get_next_bit_offset () |
577 | && get_next_bit_offset () > other.get_start_bit_offset ()) |
578 | return true; |
579 | return false; |
580 | } |
581 | |
582 | /* If this is expressible as a concrete byte range, return true |
583 | and write it to *OUT. Otherwise return false. */ |
584 | |
585 | bool |
586 | concrete_binding::get_byte_range (byte_range *out) const |
587 | { |
588 | return m_bit_range.as_byte_range (out); |
589 | } |
590 | |
591 | /* Comparator for use by vec<const concrete_binding *>::qsort. */ |
592 | |
593 | int |
594 | concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2) |
595 | { |
596 | const concrete_binding *b1 = *(const concrete_binding * const *)p1; |
597 | const concrete_binding *b2 = *(const concrete_binding * const *)p2; |
598 | |
599 | return bit_range::cmp (br1: b1->m_bit_range, br2: b2->m_bit_range); |
600 | } |
601 | |
602 | /* class symbolic_binding : public binding_key. */ |
603 | |
604 | void |
605 | symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const |
606 | { |
607 | //binding_key::dump_to_pp (pp, simple); |
608 | pp_string (pp, "region: " ); |
609 | m_region->dump_to_pp (pp, simple); |
610 | } |
611 | |
612 | /* Comparator for use by vec<const symbolic_binding *>::qsort. */ |
613 | |
614 | int |
615 | symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2) |
616 | { |
617 | const symbolic_binding *b1 = *(const symbolic_binding * const *)p1; |
618 | const symbolic_binding *b2 = *(const symbolic_binding * const *)p2; |
619 | |
620 | return region::cmp_ids (s1: b1->get_region (), s2: b2->get_region ()); |
621 | } |
622 | |
623 | /* The store is oblivious to the types of the svalues bound within |
624 | it: any type can get bound at any location. |
625 | Simplify any casts before binding. |
626 | |
627 | For example, if we have: |
628 | struct big { int ia[1024]; }; |
629 | struct big src, dst; |
630 | memcpy (&dst, &src, sizeof (struct big)); |
631 | this reaches us in gimple form as: |
632 | MEM <unsigned char[4096]> [(char * {ref-all})&dst] |
633 | = MEM <unsigned char[4096]> [(char * {ref-all})&src]; |
634 | Using cast_region when handling the MEM_REF would give us: |
635 | INIT_VAL(CAST_REG(unsigned char[4096], src)) |
636 | as rhs_sval, but we can fold that into a cast svalue: |
637 | CAST(unsigned char[4096], INIT_VAL(src)) |
638 | We can discard that cast from the svalue when binding it in |
639 | the store for "dst", and simply store: |
640 | cluster for: dst |
641 | key: {kind: direct, start: 0, size: 32768, next: 32768} |
642 | value: ‘struct big’ {INIT_VAL(src)}. */ |
643 | |
644 | static const svalue * |
645 | simplify_for_binding (const svalue *sval) |
646 | { |
647 | if (const svalue *cast_sval = sval->maybe_undo_cast ()) |
648 | sval = cast_sval; |
649 | return sval; |
650 | } |
651 | |
652 | /* class binding_map. */ |
653 | |
654 | /* binding_map's copy ctor. */ |
655 | |
656 | binding_map::binding_map (const binding_map &other) |
657 | : m_map (other.m_map) |
658 | { |
659 | } |
660 | |
661 | /* binding_map's assignment operator. */ |
662 | |
663 | binding_map& |
664 | binding_map::operator=(const binding_map &other) |
665 | { |
666 | /* For now, assume we only ever copy to an empty cluster. */ |
667 | gcc_assert (m_map.elements () == 0); |
668 | for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end (); |
669 | ++iter) |
670 | { |
671 | const binding_key *key = (*iter).first; |
672 | const svalue *sval = (*iter).second; |
673 | m_map.put (k: key, v: sval); |
674 | } |
675 | return *this; |
676 | } |
677 | |
678 | /* binding_map's equality operator. */ |
679 | |
680 | bool |
681 | binding_map::operator== (const binding_map &other) const |
682 | { |
683 | if (m_map.elements () != other.m_map.elements ()) |
684 | return false; |
685 | |
686 | for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter) |
687 | { |
688 | const binding_key *key = (*iter).first; |
689 | const svalue *sval = (*iter).second; |
690 | const svalue **other_slot |
691 | = const_cast <map_t &> (other.m_map).get (k: key); |
692 | if (other_slot == NULL) |
693 | return false; |
694 | if (sval != *other_slot) |
695 | return false; |
696 | } |
697 | gcc_checking_assert (hash () == other.hash ()); |
698 | return true; |
699 | } |
700 | |
701 | /* Generate a hash value for this binding_map. */ |
702 | |
703 | hashval_t |
704 | binding_map::hash () const |
705 | { |
706 | hashval_t result = 0; |
707 | for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter) |
708 | { |
709 | /* Use a new hasher for each key to avoid depending on the ordering |
710 | of keys when accumulating the result. */ |
711 | inchash::hash hstate; |
712 | hstate.add_ptr (ptr: (*iter).first); |
713 | hstate.add_ptr (ptr: (*iter).second); |
714 | result ^= hstate.end (); |
715 | } |
716 | return result; |
717 | } |
718 | |
719 | /* Dump a representation of this binding_map to PP. |
720 | SIMPLE controls how values and regions are to be printed. |
721 | If MULTILINE, then split the dump over multiple lines and |
722 | use whitespace for readability, otherwise put all on one line. */ |
723 | |
724 | void |
725 | binding_map::dump_to_pp (pretty_printer *pp, bool simple, |
726 | bool multiline) const |
727 | { |
728 | auto_vec <const binding_key *> binding_keys; |
729 | for (map_t::iterator iter = m_map.begin (); |
730 | iter != m_map.end (); ++iter) |
731 | { |
732 | const binding_key *key = (*iter).first; |
733 | binding_keys.safe_push (obj: key); |
734 | } |
735 | binding_keys.qsort (binding_key::cmp_ptrs); |
736 | |
737 | const binding_key *key; |
738 | unsigned i; |
739 | FOR_EACH_VEC_ELT (binding_keys, i, key) |
740 | { |
741 | const svalue *value = *const_cast <map_t &> (m_map).get (k: key); |
742 | if (multiline) |
743 | { |
744 | pp_string (pp, " key: {" ); |
745 | key->dump_to_pp (pp, simple); |
746 | pp_string (pp, "}" ); |
747 | pp_newline (pp); |
748 | pp_string (pp, " value: " ); |
749 | if (tree t = value->get_type ()) |
750 | dump_quoted_tree (pp, t); |
751 | pp_string (pp, " {" ); |
752 | value->dump_to_pp (pp, simple); |
753 | pp_string (pp, "}" ); |
754 | pp_newline (pp); |
755 | } |
756 | else |
757 | { |
758 | if (i > 0) |
759 | pp_string (pp, ", " ); |
760 | pp_string (pp, "binding key: {" ); |
761 | key->dump_to_pp (pp, simple); |
762 | pp_string (pp, "}, value: {" ); |
763 | value->dump_to_pp (pp, simple); |
764 | pp_string (pp, "}" ); |
765 | } |
766 | } |
767 | } |
768 | |
769 | /* Dump a multiline representation of this binding_map to stderr. */ |
770 | |
771 | DEBUG_FUNCTION void |
772 | binding_map::dump (bool simple) const |
773 | { |
774 | pretty_printer pp; |
775 | pp_format_decoder (&pp) = default_tree_printer; |
776 | pp_show_color (&pp) = pp_show_color (global_dc->printer); |
777 | pp.buffer->stream = stderr; |
778 | dump_to_pp (pp: &pp, simple, multiline: true); |
779 | pp_newline (&pp); |
780 | pp_flush (&pp); |
781 | } |
782 | |
783 | /* Return a new json::object of the form |
784 | {KEY_DESC : SVALUE_DESC, |
785 | ...for the various key/value pairs in this binding_map}. */ |
786 | |
787 | json::object * |
788 | binding_map::to_json () const |
789 | { |
790 | json::object *map_obj = new json::object (); |
791 | |
792 | auto_vec <const binding_key *> binding_keys; |
793 | for (map_t::iterator iter = m_map.begin (); |
794 | iter != m_map.end (); ++iter) |
795 | { |
796 | const binding_key *key = (*iter).first; |
797 | binding_keys.safe_push (obj: key); |
798 | } |
799 | binding_keys.qsort (binding_key::cmp_ptrs); |
800 | |
801 | const binding_key *key; |
802 | unsigned i; |
803 | FOR_EACH_VEC_ELT (binding_keys, i, key) |
804 | { |
805 | const svalue *value = *const_cast <map_t &> (m_map).get (k: key); |
806 | label_text key_desc = key->get_desc (); |
807 | map_obj->set (key: key_desc.get (), v: value->to_json ()); |
808 | } |
809 | |
810 | return map_obj; |
811 | } |
812 | |
813 | /* Comparator for imposing an order on binding_maps. */ |
814 | |
815 | int |
816 | binding_map::cmp (const binding_map &map1, const binding_map &map2) |
817 | { |
818 | if (int count_cmp = map1.elements () - map2.elements ()) |
819 | return count_cmp; |
820 | |
821 | auto_vec <const binding_key *> keys1 (map1.elements ()); |
822 | for (map_t::iterator iter = map1.begin (); |
823 | iter != map1.end (); ++iter) |
824 | keys1.quick_push (obj: (*iter).first); |
825 | keys1.qsort (binding_key::cmp_ptrs); |
826 | |
827 | auto_vec <const binding_key *> keys2 (map2.elements ()); |
828 | for (map_t::iterator iter = map2.begin (); |
829 | iter != map2.end (); ++iter) |
830 | keys2.quick_push (obj: (*iter).first); |
831 | keys2.qsort (binding_key::cmp_ptrs); |
832 | |
833 | for (size_t i = 0; i < keys1.length (); i++) |
834 | { |
835 | const binding_key *k1 = keys1[i]; |
836 | const binding_key *k2 = keys2[i]; |
837 | if (int key_cmp = binding_key::cmp (k1, k2)) |
838 | return key_cmp; |
839 | gcc_assert (k1 == k2); |
840 | if (int sval_cmp = svalue::cmp_ptr (map1.get (key: k1), map2.get (key: k2))) |
841 | return sval_cmp; |
842 | } |
843 | |
844 | return 0; |
845 | } |
846 | |
847 | /* Get the child region of PARENT_REG based upon INDEX within a |
848 | CONSTRUCTOR. */ |
849 | |
850 | static const region * |
851 | get_subregion_within_ctor (const region *parent_reg, tree index, |
852 | region_model_manager *mgr) |
853 | { |
854 | switch (TREE_CODE (index)) |
855 | { |
856 | default: |
857 | gcc_unreachable (); |
858 | case INTEGER_CST: |
859 | { |
860 | const svalue *index_sval |
861 | = mgr->get_or_create_constant_svalue (cst_expr: index); |
862 | return mgr->get_element_region (parent: parent_reg, |
863 | TREE_TYPE (parent_reg->get_type ()), |
864 | index: index_sval); |
865 | } |
866 | break; |
867 | case FIELD_DECL: |
868 | return mgr->get_field_region (parent: parent_reg, field: index); |
869 | } |
870 | } |
871 | |
872 | /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */ |
873 | |
874 | static const svalue * |
875 | get_svalue_for_ctor_val (tree val, region_model_manager *mgr) |
876 | { |
877 | /* Reuse the get_rvalue logic from region_model. */ |
878 | region_model m (mgr); |
879 | return m.get_rvalue (pv: path_var (val, 0), NULL); |
880 | } |
881 | |
882 | /* Bind values from CONSTRUCTOR to this map, relative to |
883 | PARENT_REG's relationship to its base region. |
884 | Return true if successful, false if there was a problem (e.g. due |
885 | to hitting a complexity limit). */ |
886 | |
887 | bool |
888 | binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor, |
889 | region_model_manager *mgr) |
890 | { |
891 | gcc_assert (parent_reg); |
892 | gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR); |
893 | |
894 | unsigned ix; |
895 | tree index; |
896 | tree val; |
897 | tree parent_type = parent_reg->get_type (); |
898 | tree field; |
899 | if (TREE_CODE (parent_type) == RECORD_TYPE) |
900 | field = TYPE_FIELDS (parent_type); |
901 | else |
902 | field = NULL_TREE; |
903 | FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val) |
904 | { |
905 | if (!index) |
906 | { |
907 | /* If index is NULL, then iterate through the fields for |
908 | a RECORD_TYPE, or use an INTEGER_CST otherwise. |
909 | Compare with similar logic in output_constructor. */ |
910 | if (field) |
911 | { |
912 | index = field; |
913 | field = DECL_CHAIN (field); |
914 | } |
915 | else |
916 | index = build_int_cst (integer_type_node, ix); |
917 | } |
918 | else if (TREE_CODE (index) == RANGE_EXPR) |
919 | { |
920 | tree min_index = TREE_OPERAND (index, 0); |
921 | tree max_index = TREE_OPERAND (index, 1); |
922 | if (min_index == max_index) |
923 | { |
924 | if (!apply_ctor_pair_to_child_region (parent_reg, mgr, |
925 | index: min_index, val)) |
926 | return false; |
927 | } |
928 | else |
929 | { |
930 | if (!apply_ctor_val_to_range (parent_reg, mgr, |
931 | min_index, max_index, val)) |
932 | return false; |
933 | } |
934 | continue; |
935 | } |
936 | if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val)) |
937 | return false; |
938 | } |
939 | return true; |
940 | } |
941 | |
942 | /* Bind the value VAL into the range of elements within PARENT_REF |
943 | from MIN_INDEX to MAX_INDEX (including endpoints). |
944 | For use in handling RANGE_EXPR within a CONSTRUCTOR. |
945 | Return true if successful, false if there was a problem (e.g. due |
946 | to hitting a complexity limit). */ |
947 | |
948 | bool |
949 | binding_map::apply_ctor_val_to_range (const region *parent_reg, |
950 | region_model_manager *mgr, |
951 | tree min_index, tree max_index, |
952 | tree val) |
953 | { |
954 | gcc_assert (TREE_CODE (min_index) == INTEGER_CST); |
955 | gcc_assert (TREE_CODE (max_index) == INTEGER_CST); |
956 | |
957 | /* Generate a binding key for the range. */ |
958 | const region *min_element |
959 | = get_subregion_within_ctor (parent_reg, index: min_index, mgr); |
960 | const region *max_element |
961 | = get_subregion_within_ctor (parent_reg, index: max_index, mgr); |
962 | region_offset min_offset = min_element->get_offset (mgr); |
963 | if (min_offset.symbolic_p ()) |
964 | return false; |
965 | bit_offset_t start_bit_offset = min_offset.get_bit_offset (); |
966 | store_manager *smgr = mgr->get_store_manager (); |
967 | if (max_element->empty_p ()) |
968 | return false; |
969 | const binding_key *max_element_key = binding_key::make (mgr: smgr, r: max_element); |
970 | if (max_element_key->symbolic_p ()) |
971 | return false; |
972 | const concrete_binding *max_element_ckey |
973 | = max_element_key->dyn_cast_concrete_binding (); |
974 | bit_size_t range_size_in_bits |
975 | = max_element_ckey->get_next_bit_offset () - start_bit_offset; |
976 | const concrete_binding *range_key |
977 | = smgr->get_concrete_binding (start_bit_offset, size_in_bits: range_size_in_bits); |
978 | if (range_key->symbolic_p ()) |
979 | return false; |
980 | |
981 | /* Get the value. */ |
982 | if (TREE_CODE (val) == CONSTRUCTOR) |
983 | return false; |
984 | const svalue *sval = get_svalue_for_ctor_val (val, mgr); |
985 | |
986 | /* Bind the value to the range. */ |
987 | put (k: range_key, v: sval); |
988 | return true; |
989 | } |
990 | |
991 | /* Bind the value VAL into INDEX within PARENT_REF. |
992 | For use in handling a pair of entries within a CONSTRUCTOR. |
993 | Return true if successful, false if there was a problem (e.g. due |
994 | to hitting a complexity limit). */ |
995 | |
996 | bool |
997 | binding_map::apply_ctor_pair_to_child_region (const region *parent_reg, |
998 | region_model_manager *mgr, |
999 | tree index, tree val) |
1000 | { |
1001 | const region *child_reg |
1002 | = get_subregion_within_ctor (parent_reg, index, mgr); |
1003 | if (TREE_CODE (val) == CONSTRUCTOR) |
1004 | return apply_ctor_to_region (parent_reg: child_reg, ctor: val, mgr); |
1005 | else |
1006 | { |
1007 | const svalue *sval = get_svalue_for_ctor_val (val, mgr); |
1008 | if (child_reg->empty_p ()) |
1009 | return false; |
1010 | const binding_key *k |
1011 | = binding_key::make (mgr: mgr->get_store_manager (), r: child_reg); |
1012 | /* Handle the case where we have an unknown size for child_reg |
1013 | (e.g. due to it being a trailing field with incomplete array |
1014 | type. */ |
1015 | if (!k->concrete_p ()) |
1016 | { |
1017 | /* Assume that sval has a well-defined size for this case. */ |
1018 | tree sval_type = sval->get_type (); |
1019 | gcc_assert (sval_type); |
1020 | HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type); |
1021 | gcc_assert (sval_byte_size != -1); |
1022 | bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT; |
1023 | /* Get offset of child relative to base region. */ |
1024 | region_offset child_base_offset = child_reg->get_offset (mgr); |
1025 | if (child_base_offset.symbolic_p ()) |
1026 | return false; |
1027 | /* Convert to an offset relative to the parent region. */ |
1028 | region_offset parent_base_offset = parent_reg->get_offset (mgr); |
1029 | gcc_assert (!parent_base_offset.symbolic_p ()); |
1030 | bit_offset_t child_parent_offset |
1031 | = (child_base_offset.get_bit_offset () |
1032 | - parent_base_offset.get_bit_offset ()); |
1033 | /* Create a concrete key for the child within the parent. */ |
1034 | k = mgr->get_store_manager ()->get_concrete_binding |
1035 | (start_bit_offset: child_parent_offset, size_in_bits: sval_bit_size); |
1036 | } |
1037 | gcc_assert (k->concrete_p ()); |
1038 | put (k, v: sval); |
1039 | return true; |
1040 | } |
1041 | } |
1042 | |
1043 | /* Populate OUT with all bindings within this map that overlap KEY. */ |
1044 | |
1045 | void |
1046 | binding_map::get_overlapping_bindings (const binding_key *key, |
1047 | auto_vec<const binding_key *> *out) |
1048 | { |
1049 | for (auto iter : *this) |
1050 | { |
1051 | const binding_key *iter_key = iter.first; |
1052 | if (const concrete_binding *ckey |
1053 | = key->dyn_cast_concrete_binding ()) |
1054 | { |
1055 | if (const concrete_binding *iter_ckey |
1056 | = iter_key->dyn_cast_concrete_binding ()) |
1057 | { |
1058 | if (ckey->overlaps_p (other: *iter_ckey)) |
1059 | out->safe_push (obj: iter_key); |
1060 | } |
1061 | else |
1062 | { |
1063 | /* Assume overlap. */ |
1064 | out->safe_push (obj: iter_key); |
1065 | } |
1066 | } |
1067 | else |
1068 | { |
1069 | /* Assume overlap. */ |
1070 | out->safe_push (obj: iter_key); |
1071 | } |
1072 | } |
1073 | } |
1074 | |
1075 | /* Remove, truncate, and/or split any bindings within this map that |
1076 | overlap DROP_KEY. |
1077 | |
1078 | For example, if we have: |
1079 | |
1080 | +------------------------------------+ |
1081 | | old binding | |
1082 | +------------------------------------+ |
1083 | |
1084 | which is to be overwritten with: |
1085 | |
1086 | .......+----------------------+....... |
1087 | .......| new binding |....... |
1088 | .......+----------------------+....... |
1089 | |
1090 | this function "cuts a hole" out of the old binding: |
1091 | |
1092 | +------+......................+------+ |
1093 | |prefix| hole for new binding |suffix| |
1094 | +------+......................+------+ |
1095 | |
1096 | into which the new binding can be added without |
1097 | overlapping the prefix or suffix. |
1098 | |
1099 | The prefix and suffix (if added) will be bound to the pertinent |
1100 | parts of the value of the old binding. |
1101 | |
1102 | For example, given: |
1103 | struct s5 |
1104 | { |
1105 | char arr[8]; |
1106 | }; |
1107 | void test_5 (struct s5 *p) |
1108 | { |
1109 | struct s5 f = *p; |
1110 | f.arr[3] = 42; |
1111 | } |
1112 | then after the "f = *p;" we have: |
1113 | cluster for: f: INIT_VAL((*INIT_VAL(p_33(D)))) |
1114 | and at the "f.arr[3] = 42;" we remove the bindings overlapping |
1115 | "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7) |
1116 | giving: |
1117 | cluster for: f |
1118 | key: {bytes 0-2} |
1119 | value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))} |
1120 | key: {bytes 4-7} |
1121 | value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))} |
1122 | punching a hole into which the new value can be written at byte 3: |
1123 | cluster for: f |
1124 | key: {bytes 0-2} |
1125 | value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))} |
1126 | key: {byte 3} |
1127 | value: 'char' {(char)42} |
1128 | key: {bytes 4-7} |
1129 | value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))} |
1130 | |
1131 | If UNCERTAINTY is non-NULL, use it to record any svalues that |
1132 | were removed, as being maybe-bound. |
1133 | |
1134 | If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that |
1135 | were removed as being maybe-live. |
1136 | |
1137 | If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything |
1138 | in the map, due to one or both of the underlying clusters being |
1139 | symbolic (but not the same symbolic region). Hence even if DROP_KEY is a |
1140 | concrete binding it could actually be referring to the same memory as |
1141 | distinct concrete bindings in the map. Remove all bindings, but |
1142 | register any svalues with *UNCERTAINTY. */ |
1143 | |
1144 | void |
1145 | binding_map::remove_overlapping_bindings (store_manager *mgr, |
1146 | const binding_key *drop_key, |
1147 | uncertainty_t *uncertainty, |
1148 | svalue_set *maybe_live_values, |
1149 | bool always_overlap) |
1150 | { |
1151 | /* Get the bindings of interest within this map. */ |
1152 | auto_vec<const binding_key *> bindings; |
1153 | if (always_overlap) |
1154 | for (auto iter : *this) |
1155 | bindings.safe_push (obj: iter.first); /* Add all bindings. */ |
1156 | else |
1157 | /* Just add overlapping bindings. */ |
1158 | get_overlapping_bindings (key: drop_key, out: &bindings); |
1159 | |
1160 | unsigned i; |
1161 | const binding_key *iter_binding; |
1162 | FOR_EACH_VEC_ELT (bindings, i, iter_binding) |
1163 | { |
1164 | /* Record any svalues that were removed to *UNCERTAINTY as being |
1165 | maybe-bound, provided at least some part of the binding is symbolic. |
1166 | |
1167 | Specifically, if at least one of the bindings is symbolic, or we |
1168 | have ALWAYS_OVERLAP for the case where we have possibly aliasing |
1169 | regions, then we don't know that the svalue has been overwritten, |
1170 | and should record that to *UNCERTAINTY. |
1171 | |
1172 | However, if we have concrete keys accessing within the same symbolic |
1173 | region, then we *know* that the symbolic region has been overwritten, |
1174 | so we don't record it to *UNCERTAINTY, as this could be a genuine |
1175 | leak. */ |
1176 | const svalue *old_sval = get (key: iter_binding); |
1177 | if (uncertainty |
1178 | && (drop_key->symbolic_p () |
1179 | || iter_binding->symbolic_p () |
1180 | || always_overlap)) |
1181 | uncertainty->on_maybe_bound_sval (sval: old_sval); |
1182 | |
1183 | /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being |
1184 | maybe-live. */ |
1185 | if (maybe_live_values) |
1186 | maybe_live_values->add (k: old_sval); |
1187 | |
1188 | /* Begin by removing the old binding. */ |
1189 | m_map.remove (k: iter_binding); |
1190 | |
1191 | /* Don't attempt to handle prefixes/suffixes for the |
1192 | "always_overlap" case; everything's being removed. */ |
1193 | if (always_overlap) |
1194 | continue; |
1195 | |
1196 | /* Now potentially add the prefix and suffix. */ |
1197 | if (const concrete_binding *drop_ckey |
1198 | = drop_key->dyn_cast_concrete_binding ()) |
1199 | if (const concrete_binding *iter_ckey |
1200 | = iter_binding->dyn_cast_concrete_binding ()) |
1201 | { |
1202 | gcc_assert (drop_ckey->overlaps_p (*iter_ckey)); |
1203 | |
1204 | const bit_range &drop_bits = drop_ckey->get_bit_range (); |
1205 | const bit_range &iter_bits = iter_ckey->get_bit_range (); |
1206 | |
1207 | if (iter_bits.get_start_bit_offset () |
1208 | < drop_bits.get_start_bit_offset ()) |
1209 | { |
1210 | /* We have a truncated prefix. */ |
1211 | bit_range prefix_bits (iter_bits.get_start_bit_offset (), |
1212 | (drop_bits.get_start_bit_offset () |
1213 | - iter_bits.get_start_bit_offset ())); |
1214 | const concrete_binding *prefix_key |
1215 | = mgr->get_concrete_binding (bits: prefix_bits); |
1216 | bit_range rel_prefix (0, prefix_bits.m_size_in_bits); |
1217 | const svalue *prefix_sval |
1218 | = old_sval->extract_bit_range (NULL_TREE, |
1219 | subrange: rel_prefix, |
1220 | mgr: mgr->get_svalue_manager ()); |
1221 | m_map.put (k: prefix_key, v: prefix_sval); |
1222 | } |
1223 | |
1224 | if (iter_bits.get_next_bit_offset () |
1225 | > drop_bits.get_next_bit_offset ()) |
1226 | { |
1227 | /* We have a truncated suffix. */ |
1228 | bit_range suffix_bits (drop_bits.get_next_bit_offset (), |
1229 | (iter_bits.get_next_bit_offset () |
1230 | - drop_bits.get_next_bit_offset ())); |
1231 | const concrete_binding *suffix_key |
1232 | = mgr->get_concrete_binding (bits: suffix_bits); |
1233 | bit_range rel_suffix (drop_bits.get_next_bit_offset () |
1234 | - iter_bits.get_start_bit_offset (), |
1235 | suffix_bits.m_size_in_bits); |
1236 | const svalue *suffix_sval |
1237 | = old_sval->extract_bit_range (NULL_TREE, |
1238 | subrange: rel_suffix, |
1239 | mgr: mgr->get_svalue_manager ()); |
1240 | m_map.put (k: suffix_key, v: suffix_sval); |
1241 | } |
1242 | } |
1243 | } |
1244 | } |
1245 | |
1246 | /* class binding_cluster. */ |
1247 | |
1248 | binding_cluster::binding_cluster (const region *base_region) |
1249 | : m_base_region (base_region), m_map (), |
1250 | m_escaped (false), m_touched (false) |
1251 | { |
1252 | } |
1253 | |
1254 | /* binding_cluster's copy ctor. */ |
1255 | |
1256 | binding_cluster::binding_cluster (const binding_cluster &other) |
1257 | : m_base_region (other.m_base_region), m_map (other.m_map), |
1258 | m_escaped (other.m_escaped), m_touched (other.m_touched) |
1259 | { |
1260 | } |
1261 | |
1262 | /* binding_cluster's assignment operator. */ |
1263 | |
1264 | binding_cluster& |
1265 | binding_cluster::operator= (const binding_cluster &other) |
1266 | { |
1267 | gcc_assert (m_base_region == other.m_base_region); |
1268 | m_map = other.m_map; |
1269 | m_escaped = other.m_escaped; |
1270 | m_touched = other.m_touched; |
1271 | return *this; |
1272 | } |
1273 | |
1274 | /* binding_cluster's equality operator. */ |
1275 | |
1276 | bool |
1277 | binding_cluster::operator== (const binding_cluster &other) const |
1278 | { |
1279 | if (m_map != other.m_map) |
1280 | return false; |
1281 | |
1282 | if (m_base_region != other.m_base_region) |
1283 | return false; |
1284 | |
1285 | if (m_escaped != other.m_escaped) |
1286 | return false; |
1287 | |
1288 | if (m_touched != other.m_touched) |
1289 | return false; |
1290 | |
1291 | gcc_checking_assert (hash () == other.hash ()); |
1292 | |
1293 | return true; |
1294 | } |
1295 | |
1296 | /* Generate a hash value for this binding_cluster. */ |
1297 | |
1298 | hashval_t |
1299 | binding_cluster::hash () const |
1300 | { |
1301 | return m_map.hash (); |
1302 | } |
1303 | |
1304 | /* Return true if this binding_cluster is symbolic |
1305 | i.e. its base region is symbolic. */ |
1306 | |
1307 | bool |
1308 | binding_cluster::symbolic_p () const |
1309 | { |
1310 | return m_base_region->get_kind () == RK_SYMBOLIC; |
1311 | } |
1312 | |
1313 | /* Dump a representation of this binding_cluster to PP. |
1314 | SIMPLE controls how values and regions are to be printed. |
1315 | If MULTILINE, then split the dump over multiple lines and |
1316 | use whitespace for readability, otherwise put all on one line. */ |
1317 | |
1318 | void |
1319 | binding_cluster::dump_to_pp (pretty_printer *pp, bool simple, |
1320 | bool multiline) const |
1321 | { |
1322 | if (m_escaped) |
1323 | { |
1324 | if (multiline) |
1325 | { |
1326 | pp_string (pp, " ESCAPED" ); |
1327 | pp_newline (pp); |
1328 | } |
1329 | else |
1330 | pp_string (pp, "(ESCAPED)" ); |
1331 | } |
1332 | if (m_touched) |
1333 | { |
1334 | if (multiline) |
1335 | { |
1336 | pp_string (pp, " TOUCHED" ); |
1337 | pp_newline (pp); |
1338 | } |
1339 | else |
1340 | pp_string (pp, "(TOUCHED)" ); |
1341 | } |
1342 | |
1343 | m_map.dump_to_pp (pp, simple, multiline); |
1344 | } |
1345 | |
1346 | /* Dump a multiline representation of this binding_cluster to stderr. */ |
1347 | |
1348 | DEBUG_FUNCTION void |
1349 | binding_cluster::dump (bool simple) const |
1350 | { |
1351 | pretty_printer pp; |
1352 | pp_format_decoder (&pp) = default_tree_printer; |
1353 | pp_show_color (&pp) = pp_show_color (global_dc->printer); |
1354 | pp.buffer->stream = stderr; |
1355 | pp_string (&pp, " cluster for: " ); |
1356 | m_base_region->dump_to_pp (pp: &pp, simple); |
1357 | pp_string (&pp, ": " ); |
1358 | pp_newline (&pp); |
1359 | dump_to_pp (pp: &pp, simple, multiline: true); |
1360 | pp_newline (&pp); |
1361 | pp_flush (&pp); |
1362 | } |
1363 | |
1364 | /* Assert that this object is valid. */ |
1365 | |
1366 | void |
1367 | binding_cluster::validate () const |
1368 | { |
1369 | int num_symbolic = 0; |
1370 | int num_concrete = 0; |
1371 | for (auto iter : m_map) |
1372 | { |
1373 | if (iter.first->symbolic_p ()) |
1374 | num_symbolic++; |
1375 | else |
1376 | num_concrete++; |
1377 | } |
1378 | /* We shouldn't have more than one symbolic key per cluster |
1379 | (or one would have clobbered the other). */ |
1380 | gcc_assert (num_symbolic < 2); |
1381 | /* We can't have both concrete and symbolic keys. */ |
1382 | gcc_assert (num_concrete == 0 || num_symbolic == 0); |
1383 | } |
1384 | |
1385 | /* Return a new json::object of the form |
1386 | {"escaped": true/false, |
1387 | "touched": true/false, |
1388 | "map" : object for the binding_map. */ |
1389 | |
1390 | json::object * |
1391 | binding_cluster::to_json () const |
1392 | { |
1393 | json::object *cluster_obj = new json::object (); |
1394 | |
1395 | cluster_obj->set (key: "escaped" , v: new json::literal (m_escaped)); |
1396 | cluster_obj->set (key: "touched" , v: new json::literal (m_touched)); |
1397 | cluster_obj->set (key: "map" , v: m_map.to_json ()); |
1398 | |
1399 | return cluster_obj; |
1400 | } |
1401 | |
1402 | /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a |
1403 | compound_sval. */ |
1404 | |
1405 | void |
1406 | binding_cluster::bind (store_manager *mgr, |
1407 | const region *reg, const svalue *sval) |
1408 | { |
1409 | if (const compound_svalue *compound_sval |
1410 | = sval->dyn_cast_compound_svalue ()) |
1411 | { |
1412 | bind_compound_sval (mgr, reg, compound_sval); |
1413 | return; |
1414 | } |
1415 | |
1416 | if (reg->empty_p ()) |
1417 | return; |
1418 | const binding_key *binding = binding_key::make (mgr, r: reg); |
1419 | bind_key (key: binding, sval); |
1420 | } |
1421 | |
1422 | /* Bind SVAL to KEY. |
1423 | Unpacking of compound_svalues should already have been done by the |
1424 | time this is called. */ |
1425 | |
1426 | void |
1427 | binding_cluster::bind_key (const binding_key *key, const svalue *sval) |
1428 | { |
1429 | gcc_assert (sval->get_kind () != SK_COMPOUND); |
1430 | |
1431 | m_map.put (k: key, v: sval); |
1432 | if (key->symbolic_p ()) |
1433 | m_touched = true; |
1434 | } |
1435 | |
1436 | /* Subroutine of binding_cluster::bind. |
1437 | Unpack compound_svals when binding them, so that we bind them |
1438 | element-wise. */ |
1439 | |
1440 | void |
1441 | binding_cluster::bind_compound_sval (store_manager *mgr, |
1442 | const region *reg, |
1443 | const compound_svalue *compound_sval) |
1444 | { |
1445 | region_offset reg_offset |
1446 | = reg->get_offset (mgr: mgr->get_svalue_manager ()); |
1447 | if (reg_offset.symbolic_p ()) |
1448 | { |
1449 | m_touched = true; |
1450 | clobber_region (mgr, reg); |
1451 | return; |
1452 | } |
1453 | |
1454 | for (map_t::iterator iter = compound_sval->begin (); |
1455 | iter != compound_sval->end (); ++iter) |
1456 | { |
1457 | const binding_key *iter_key = (*iter).first; |
1458 | const svalue *iter_sval = (*iter).second; |
1459 | |
1460 | if (const concrete_binding *concrete_key |
1461 | = iter_key->dyn_cast_concrete_binding ()) |
1462 | { |
1463 | bit_offset_t effective_start |
1464 | = (concrete_key->get_start_bit_offset () |
1465 | + reg_offset.get_bit_offset ()); |
1466 | const concrete_binding *effective_concrete_key |
1467 | = mgr->get_concrete_binding (start_bit_offset: effective_start, |
1468 | size_in_bits: concrete_key->get_size_in_bits ()); |
1469 | bind_key (key: effective_concrete_key, sval: iter_sval); |
1470 | } |
1471 | else |
1472 | gcc_unreachable (); |
1473 | } |
1474 | } |
1475 | |
1476 | /* Remove all bindings overlapping REG within this cluster. */ |
1477 | |
1478 | void |
1479 | binding_cluster::clobber_region (store_manager *mgr, const region *reg) |
1480 | { |
1481 | remove_overlapping_bindings (mgr, reg, NULL, NULL); |
1482 | } |
1483 | |
1484 | /* Remove any bindings for REG within this cluster. */ |
1485 | |
1486 | void |
1487 | binding_cluster::purge_region (store_manager *mgr, const region *reg) |
1488 | { |
1489 | gcc_assert (reg->get_kind () == RK_DECL); |
1490 | if (reg->empty_p ()) |
1491 | return; |
1492 | const binding_key *binding |
1493 | = binding_key::make (mgr, r: const_cast<region *> (reg)); |
1494 | m_map.remove (k: binding); |
1495 | } |
1496 | |
1497 | /* Clobber REG and fill it with repeated copies of SVAL. */ |
1498 | |
1499 | void |
1500 | binding_cluster::fill_region (store_manager *mgr, |
1501 | const region *reg, |
1502 | const svalue *sval) |
1503 | { |
1504 | clobber_region (mgr, reg); |
1505 | |
1506 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
1507 | const svalue *byte_size_sval = reg->get_byte_size_sval (mgr: sval_mgr); |
1508 | const svalue *fill_sval |
1509 | = sval_mgr->get_or_create_repeated_svalue (type: reg->get_type (), |
1510 | outer_size: byte_size_sval, inner_svalue: sval); |
1511 | bind (mgr, reg, sval: fill_sval); |
1512 | } |
1513 | |
1514 | /* Clobber REG within this cluster and fill it with zeroes. */ |
1515 | |
1516 | void |
1517 | binding_cluster::zero_fill_region (store_manager *mgr, const region *reg) |
1518 | { |
1519 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
1520 | const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, cst: 0); |
1521 | fill_region (mgr, reg, sval: zero_sval); |
1522 | } |
1523 | |
1524 | /* Mark REG_TO_BIND within this cluster as being unknown. |
1525 | |
1526 | Remove any bindings overlapping REG_FOR_OVERLAP. |
1527 | If UNCERTAINTY is non-NULL, use it to record any svalues that |
1528 | had bindings to them removed, as being maybe-bound. |
1529 | If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that |
1530 | had bindings to them removed, as being maybe-live. |
1531 | |
1532 | REG_TO_BIND and REG_FOR_OVERLAP are the same for |
1533 | store::mark_region_as_unknown, but are different in |
1534 | store::set_value's alias handling, for handling the case where |
1535 | we have a write to a symbolic REG_FOR_OVERLAP. */ |
1536 | |
1537 | void |
1538 | binding_cluster::mark_region_as_unknown (store_manager *mgr, |
1539 | const region *reg_to_bind, |
1540 | const region *reg_for_overlap, |
1541 | uncertainty_t *uncertainty, |
1542 | svalue_set *maybe_live_values) |
1543 | { |
1544 | if (reg_to_bind->empty_p ()) |
1545 | return; |
1546 | |
1547 | remove_overlapping_bindings (mgr, reg: reg_for_overlap, uncertainty, |
1548 | maybe_live_values); |
1549 | |
1550 | /* Add a default binding to "unknown". */ |
1551 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
1552 | const svalue *sval |
1553 | = sval_mgr->get_or_create_unknown_svalue (type: reg_to_bind->get_type ()); |
1554 | bind (mgr, reg: reg_to_bind, sval); |
1555 | } |
1556 | |
1557 | /* Purge state involving SVAL. */ |
1558 | |
1559 | void |
1560 | binding_cluster::purge_state_involving (const svalue *sval, |
1561 | region_model_manager *sval_mgr) |
1562 | { |
1563 | auto_vec<const binding_key *> to_remove; |
1564 | auto_vec<std::pair<const binding_key *, tree> > to_make_unknown; |
1565 | for (auto iter : m_map) |
1566 | { |
1567 | const binding_key *iter_key = iter.first; |
1568 | if (const symbolic_binding *symbolic_key |
1569 | = iter_key->dyn_cast_symbolic_binding ()) |
1570 | { |
1571 | const region *reg = symbolic_key->get_region (); |
1572 | if (reg->involves_p (sval)) |
1573 | to_remove.safe_push (obj: iter_key); |
1574 | } |
1575 | const svalue *iter_sval = iter.second; |
1576 | if (iter_sval->involves_p (other: sval)) |
1577 | to_make_unknown.safe_push (obj: std::make_pair(x&: iter_key, |
1578 | y: iter_sval->get_type ())); |
1579 | } |
1580 | for (auto iter : to_remove) |
1581 | { |
1582 | m_map.remove (k: iter); |
1583 | m_touched = true; |
1584 | } |
1585 | for (auto iter : to_make_unknown) |
1586 | { |
1587 | const svalue *new_sval |
1588 | = sval_mgr->get_or_create_unknown_svalue (type: iter.second); |
1589 | m_map.put (k: iter.first, v: new_sval); |
1590 | } |
1591 | } |
1592 | |
1593 | /* Get any SVAL bound to REG within this cluster via kind KIND, |
1594 | without checking parent regions of REG. */ |
1595 | |
1596 | const svalue * |
1597 | binding_cluster::get_binding (store_manager *mgr, |
1598 | const region *reg) const |
1599 | { |
1600 | if (reg->empty_p ()) |
1601 | return NULL; |
1602 | const binding_key *reg_binding = binding_key::make (mgr, r: reg); |
1603 | const svalue *sval = m_map.get (key: reg_binding); |
1604 | if (sval) |
1605 | { |
1606 | /* If we have a struct with a single field, then the binding of |
1607 | the field will equal that of the struct, and looking up e.g. |
1608 | PARENT_REG.field within: |
1609 | cluster for PARENT_REG: INIT_VAL(OTHER_REG) |
1610 | will erroneously return INIT_VAL(OTHER_REG), rather than |
1611 | SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD). |
1612 | Fix this issue by iterating upwards whilst the bindings are equal, |
1613 | expressing the lookups as subvalues. |
1614 | We have to gather a list of subregion accesses, then walk it |
1615 | in reverse to get the subvalues. */ |
1616 | auto_vec<const region *> regions; |
1617 | while (const region *parent_reg = reg->get_parent_region ()) |
1618 | { |
1619 | const binding_key *parent_reg_binding |
1620 | = binding_key::make (mgr, r: parent_reg); |
1621 | if (parent_reg_binding == reg_binding |
1622 | && sval->get_type () |
1623 | && reg->get_type () |
1624 | && sval->get_type () != reg->get_type ()) |
1625 | { |
1626 | regions.safe_push (obj: reg); |
1627 | reg = parent_reg; |
1628 | } |
1629 | else |
1630 | break; |
1631 | } |
1632 | if (sval->get_type () |
1633 | && reg->get_type () |
1634 | && sval->get_type () == reg->get_type ()) |
1635 | { |
1636 | unsigned i; |
1637 | const region *iter_reg; |
1638 | FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg) |
1639 | { |
1640 | region_model_manager *rmm_mgr = mgr->get_svalue_manager (); |
1641 | sval = rmm_mgr->get_or_create_sub_svalue (type: iter_reg->get_type (), |
1642 | parent_svalue: sval, subregion: iter_reg); |
1643 | } |
1644 | } |
1645 | } |
1646 | return sval; |
1647 | } |
1648 | |
1649 | /* Get any SVAL bound to REG within this cluster, |
1650 | either directly for REG, or recursively checking for bindings within |
1651 | parent regions and extracting subvalues if need be. */ |
1652 | |
1653 | const svalue * |
1654 | binding_cluster::get_binding_recursive (store_manager *mgr, |
1655 | const region *reg) const |
1656 | { |
1657 | if (const svalue *sval = get_binding (mgr, reg)) |
1658 | return sval; |
1659 | if (reg != m_base_region) |
1660 | if (const region *parent_reg = reg->get_parent_region ()) |
1661 | if (const svalue *parent_sval |
1662 | = get_binding_recursive (mgr, reg: parent_reg)) |
1663 | { |
1664 | /* Extract child svalue from parent svalue. */ |
1665 | region_model_manager *rmm_mgr = mgr->get_svalue_manager (); |
1666 | return rmm_mgr->get_or_create_sub_svalue (type: reg->get_type (), |
1667 | parent_svalue: parent_sval, subregion: reg); |
1668 | } |
1669 | return NULL; |
1670 | } |
1671 | |
1672 | /* Get any value bound for REG within this cluster. */ |
1673 | |
1674 | const svalue * |
1675 | binding_cluster::get_any_binding (store_manager *mgr, |
1676 | const region *reg) const |
1677 | { |
1678 | /* Look for a direct binding. */ |
1679 | if (const svalue *direct_sval |
1680 | = get_binding_recursive (mgr, reg)) |
1681 | return direct_sval; |
1682 | |
1683 | /* If we had a write to a cluster of unknown size, we might |
1684 | have a self-binding of the whole base region with an svalue, |
1685 | where the base region is symbolic. |
1686 | Handle such cases by returning sub_svalue instances. */ |
1687 | if (const svalue *cluster_sval = maybe_get_simple_value (mgr)) |
1688 | { |
1689 | /* Extract child svalue from parent svalue. */ |
1690 | region_model_manager *rmm_mgr = mgr->get_svalue_manager (); |
1691 | return rmm_mgr->get_or_create_sub_svalue (type: reg->get_type (), |
1692 | parent_svalue: cluster_sval, subregion: reg); |
1693 | } |
1694 | |
1695 | /* If this cluster has been touched by a symbolic write, then the content |
1696 | of any subregion not currently specifically bound is "UNKNOWN". */ |
1697 | if (m_touched) |
1698 | { |
1699 | region_model_manager *rmm_mgr = mgr->get_svalue_manager (); |
1700 | return rmm_mgr->get_or_create_unknown_svalue (type: reg->get_type ()); |
1701 | } |
1702 | |
1703 | /* Alternatively, if this is a symbolic read and the cluster has any bindings, |
1704 | then we don't know if we're reading those values or not, so the result |
1705 | is also "UNKNOWN". */ |
1706 | if (reg->get_offset (mgr: mgr->get_svalue_manager ()).symbolic_p () |
1707 | && m_map.elements () > 0) |
1708 | { |
1709 | region_model_manager *rmm_mgr = mgr->get_svalue_manager (); |
1710 | return rmm_mgr->get_or_create_unknown_svalue (type: reg->get_type ()); |
1711 | } |
1712 | |
1713 | if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg)) |
1714 | return compound_sval; |
1715 | |
1716 | /* Otherwise, the initial value, or uninitialized. */ |
1717 | return NULL; |
1718 | } |
1719 | |
1720 | /* Attempt to get a compound_svalue for the bindings within the cluster |
1721 | affecting REG (which could be the base region itself). |
1722 | |
1723 | Create a compound_svalue with the subset of bindings the affect REG, |
1724 | offsetting them so that the offsets are relative to the start of REG |
1725 | within the cluster. |
1726 | |
1727 | For example, REG could be one element within an array of structs. |
1728 | |
1729 | Return the resulting compound_svalue, or NULL if there's a problem. */ |
1730 | |
1731 | const svalue * |
1732 | binding_cluster::maybe_get_compound_binding (store_manager *mgr, |
1733 | const region *reg) const |
1734 | { |
1735 | region_offset cluster_offset |
1736 | = m_base_region->get_offset (mgr: mgr->get_svalue_manager ()); |
1737 | if (cluster_offset.symbolic_p ()) |
1738 | return NULL; |
1739 | region_offset reg_offset = reg->get_offset (mgr: mgr->get_svalue_manager ()); |
1740 | if (reg_offset.symbolic_p ()) |
1741 | return NULL; |
1742 | |
1743 | if (reg->empty_p ()) |
1744 | return NULL; |
1745 | |
1746 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
1747 | |
1748 | /* We will a build the result map in two parts: |
1749 | (a) result_map, holding the concrete keys from this cluster, |
1750 | |
1751 | (b) default_map, holding the initial values for the region |
1752 | (e.g. uninitialized, initializer values, or zero), unless this |
1753 | cluster has been touched. |
1754 | |
1755 | We will populate (a), and as we do, clobber (b), trimming and |
1756 | splitting its bindings as necessary. |
1757 | Finally, we will merge (b) into (a), giving a concrete map |
1758 | that merges both the initial values and the bound values from |
1759 | the binding_cluster. |
1760 | Doing it this way reduces N for the O(N^2) intersection-finding, |
1761 | perhaps we should have a spatial-organized data structure for |
1762 | concrete keys, though. */ |
1763 | |
1764 | binding_map result_map; |
1765 | binding_map default_map; |
1766 | |
1767 | /* Set up default values in default_map. */ |
1768 | const svalue *default_sval; |
1769 | if (m_touched) |
1770 | default_sval = sval_mgr->get_or_create_unknown_svalue (type: reg->get_type ()); |
1771 | else |
1772 | default_sval = sval_mgr->get_or_create_initial_value (reg); |
1773 | const binding_key *default_key = binding_key::make (mgr, r: reg); |
1774 | |
1775 | /* Express the bit-range of the default key for REG relative to REG, |
1776 | rather than to the base region. */ |
1777 | const concrete_binding *concrete_default_key |
1778 | = default_key->dyn_cast_concrete_binding (); |
1779 | if (!concrete_default_key) |
1780 | return nullptr; |
1781 | const concrete_binding *default_key_relative_to_reg |
1782 | = mgr->get_concrete_binding (start_bit_offset: 0, size_in_bits: concrete_default_key->get_size_in_bits ()); |
1783 | default_map.put (k: default_key_relative_to_reg, v: default_sval); |
1784 | |
1785 | for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter) |
1786 | { |
1787 | const binding_key *key = (*iter).first; |
1788 | const svalue *sval = (*iter).second; |
1789 | |
1790 | if (const concrete_binding *concrete_key |
1791 | = key->dyn_cast_concrete_binding ()) |
1792 | { |
1793 | const bit_range &bound_range = concrete_key->get_bit_range (); |
1794 | |
1795 | bit_size_t reg_bit_size; |
1796 | if (!reg->get_bit_size (out: ®_bit_size)) |
1797 | return NULL; |
1798 | |
1799 | bit_range reg_range (reg_offset.get_bit_offset (), |
1800 | reg_bit_size); |
1801 | |
1802 | /* Skip bindings that are outside the bit range of REG. */ |
1803 | if (!bound_range.intersects_p (other: reg_range)) |
1804 | continue; |
1805 | |
1806 | /* We shouldn't have an exact match; that should have been |
1807 | handled already. */ |
1808 | gcc_assert (!(reg_range == bound_range)); |
1809 | |
1810 | bit_range subrange (0, 0); |
1811 | if (reg_range.contains_p (other: bound_range, out: &subrange)) |
1812 | { |
1813 | /* We have a bound range fully within REG. |
1814 | Add it to map, offsetting accordingly. */ |
1815 | |
1816 | /* Get offset of KEY relative to REG, rather than to |
1817 | the cluster. */ |
1818 | const concrete_binding *offset_concrete_key |
1819 | = mgr->get_concrete_binding (bits: subrange); |
1820 | result_map.put (k: offset_concrete_key, v: sval); |
1821 | |
1822 | /* Clobber default_map, removing/trimming/spliting where |
1823 | it overlaps with offset_concrete_key. */ |
1824 | default_map.remove_overlapping_bindings (mgr, |
1825 | drop_key: offset_concrete_key, |
1826 | NULL, NULL, always_overlap: false); |
1827 | } |
1828 | else if (bound_range.contains_p (other: reg_range, out: &subrange)) |
1829 | { |
1830 | /* REG is fully within the bound range, but |
1831 | is not equal to it; we're extracting a subvalue. */ |
1832 | return sval->extract_bit_range (type: reg->get_type (), |
1833 | subrange, |
1834 | mgr: mgr->get_svalue_manager ()); |
1835 | } |
1836 | else |
1837 | { |
1838 | /* REG and the bound range partially overlap. */ |
1839 | bit_range reg_subrange (0, 0); |
1840 | bit_range bound_subrange (0, 0); |
1841 | reg_range.intersects_p (other: bound_range, |
1842 | out_this: ®_subrange, out_other: &bound_subrange); |
1843 | |
1844 | /* Get the bits from the bound value for the bits at the |
1845 | intersection (relative to the bound value). */ |
1846 | const svalue *overlap_sval |
1847 | = sval->extract_bit_range (NULL_TREE, |
1848 | subrange: bound_subrange, |
1849 | mgr: mgr->get_svalue_manager ()); |
1850 | |
1851 | /* Get key for overlap, relative to the REG. */ |
1852 | const concrete_binding *overlap_concrete_key |
1853 | = mgr->get_concrete_binding (bits: reg_subrange); |
1854 | result_map.put (k: overlap_concrete_key, v: overlap_sval); |
1855 | |
1856 | /* Clobber default_map, removing/trimming/spliting where |
1857 | it overlaps with overlap_concrete_key. */ |
1858 | default_map.remove_overlapping_bindings (mgr, |
1859 | drop_key: overlap_concrete_key, |
1860 | NULL, NULL, always_overlap: false); |
1861 | } |
1862 | } |
1863 | else |
1864 | /* Can't handle symbolic bindings. */ |
1865 | return NULL; |
1866 | } |
1867 | |
1868 | if (result_map.elements () == 0) |
1869 | return NULL; |
1870 | |
1871 | /* Merge any bindings from default_map into result_map. */ |
1872 | for (auto iter : default_map) |
1873 | { |
1874 | const binding_key *key = iter.first; |
1875 | const svalue *sval = iter.second; |
1876 | result_map.put (k: key, v: sval); |
1877 | } |
1878 | |
1879 | return sval_mgr->get_or_create_compound_svalue (type: reg->get_type (), map: result_map); |
1880 | } |
1881 | |
1882 | /* Remove, truncate, and/or split any bindings within this map that |
1883 | could overlap REG. |
1884 | |
1885 | If REG's base region or this cluster is symbolic and they're different |
1886 | base regions, then remove everything in this cluster's map, on the |
1887 | grounds that REG could be referring to the same memory as anything |
1888 | in the map. |
1889 | |
1890 | If UNCERTAINTY is non-NULL, use it to record any svalues that |
1891 | were removed, as being maybe-bound. |
1892 | |
1893 | If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that |
1894 | were removed, as being maybe-live. */ |
1895 | |
1896 | void |
1897 | binding_cluster::remove_overlapping_bindings (store_manager *mgr, |
1898 | const region *reg, |
1899 | uncertainty_t *uncertainty, |
1900 | svalue_set *maybe_live_values) |
1901 | { |
1902 | if (reg->empty_p ()) |
1903 | return; |
1904 | const binding_key *reg_binding = binding_key::make (mgr, r: reg); |
1905 | |
1906 | const region *cluster_base_reg = get_base_region (); |
1907 | const region *other_base_reg = reg->get_base_region (); |
1908 | /* If at least one of the base regions involved is symbolic, and they're |
1909 | not the same base region, then consider everything in the map as |
1910 | potentially overlapping with reg_binding (even if it's a concrete |
1911 | binding and things in the map are concrete - they could be referring |
1912 | to the same memory when the symbolic base regions are taken into |
1913 | account). */ |
1914 | bool always_overlap = (cluster_base_reg != other_base_reg |
1915 | && (cluster_base_reg->get_kind () == RK_SYMBOLIC |
1916 | || other_base_reg->get_kind () == RK_SYMBOLIC)); |
1917 | m_map.remove_overlapping_bindings (mgr, drop_key: reg_binding, uncertainty, |
1918 | maybe_live_values, |
1919 | always_overlap); |
1920 | } |
1921 | |
1922 | /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using |
1923 | MGR and MERGER. |
1924 | Return true if they can be merged, false otherwise. */ |
1925 | |
1926 | bool |
1927 | binding_cluster::can_merge_p (const binding_cluster *cluster_a, |
1928 | const binding_cluster *cluster_b, |
1929 | binding_cluster *out_cluster, |
1930 | store *out_store, |
1931 | store_manager *mgr, |
1932 | model_merger *merger) |
1933 | { |
1934 | gcc_assert (out_cluster); |
1935 | |
1936 | /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to |
1937 | true if either of the inputs is true. */ |
1938 | if ((cluster_a && cluster_a->m_escaped) |
1939 | || (cluster_b && cluster_b->m_escaped)) |
1940 | out_cluster->m_escaped = true; |
1941 | if ((cluster_a && cluster_a->m_touched) |
1942 | || (cluster_b && cluster_b->m_touched)) |
1943 | out_cluster->m_touched = true; |
1944 | |
1945 | /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either |
1946 | could be NULL. Handle these cases. */ |
1947 | if (cluster_a == NULL) |
1948 | { |
1949 | gcc_assert (cluster_b != NULL); |
1950 | gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region); |
1951 | out_cluster->make_unknown_relative_to (other_cluster: cluster_b, out_store, mgr); |
1952 | return true; |
1953 | } |
1954 | if (cluster_b == NULL) |
1955 | { |
1956 | gcc_assert (cluster_a != NULL); |
1957 | gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region); |
1958 | out_cluster->make_unknown_relative_to (other_cluster: cluster_a, out_store, mgr); |
1959 | return true; |
1960 | } |
1961 | |
1962 | /* The "both inputs are non-NULL" case. */ |
1963 | gcc_assert (cluster_a != NULL && cluster_b != NULL); |
1964 | gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region); |
1965 | gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region); |
1966 | |
1967 | hash_set<const binding_key *> keys; |
1968 | for (map_t::iterator iter_a = cluster_a->m_map.begin (); |
1969 | iter_a != cluster_a->m_map.end (); ++iter_a) |
1970 | { |
1971 | const binding_key *key_a = (*iter_a).first; |
1972 | keys.add (k: key_a); |
1973 | } |
1974 | for (map_t::iterator iter_b = cluster_b->m_map.begin (); |
1975 | iter_b != cluster_b->m_map.end (); ++iter_b) |
1976 | { |
1977 | const binding_key *key_b = (*iter_b).first; |
1978 | keys.add (k: key_b); |
1979 | } |
1980 | int num_symbolic_keys = 0; |
1981 | int num_concrete_keys = 0; |
1982 | for (hash_set<const binding_key *>::iterator iter = keys.begin (); |
1983 | iter != keys.end (); ++iter) |
1984 | { |
1985 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
1986 | const binding_key *key = *iter; |
1987 | const svalue *sval_a = cluster_a->get_any_value (key); |
1988 | const svalue *sval_b = cluster_b->get_any_value (key); |
1989 | |
1990 | if (key->symbolic_p ()) |
1991 | num_symbolic_keys++; |
1992 | else |
1993 | num_concrete_keys++; |
1994 | |
1995 | if (sval_a == sval_b) |
1996 | { |
1997 | gcc_assert (sval_a); |
1998 | out_cluster->m_map.put (k: key, v: sval_a); |
1999 | continue; |
2000 | } |
2001 | else if (sval_a && sval_b) |
2002 | { |
2003 | if (const svalue *merged_sval |
2004 | = sval_a->can_merge_p (other: sval_b, mgr: sval_mgr, merger)) |
2005 | { |
2006 | out_cluster->m_map.put (k: key, v: merged_sval); |
2007 | continue; |
2008 | } |
2009 | /* Merger of the svalues failed. Reject merger of the cluster. */ |
2010 | return false; |
2011 | } |
2012 | |
2013 | /* If we get here, then one cluster binds this key and the other |
2014 | doesn't; merge them as "UNKNOWN". */ |
2015 | gcc_assert (sval_a || sval_b); |
2016 | |
2017 | const svalue *bound_sval = sval_a ? sval_a : sval_b; |
2018 | tree type = bound_sval->get_type (); |
2019 | const svalue *unknown_sval |
2020 | = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type); |
2021 | |
2022 | /* ...but reject the merger if this sval shouldn't be mergeable |
2023 | (e.g. reject merging svalues that have non-purgable sm-state, |
2024 | to avoid falsely reporting memory leaks by merging them |
2025 | with something else). */ |
2026 | if (!bound_sval->can_merge_p (other: unknown_sval, mgr: sval_mgr, merger)) |
2027 | return false; |
2028 | |
2029 | out_cluster->m_map.put (k: key, v: unknown_sval); |
2030 | } |
2031 | |
2032 | /* We can only have at most one symbolic key per cluster, |
2033 | and if we do, we can't have any concrete keys. |
2034 | If this happens, mark the cluster as touched, with no keys. */ |
2035 | if (num_symbolic_keys >= 2 |
2036 | || (num_concrete_keys > 0 && num_symbolic_keys > 0)) |
2037 | { |
2038 | out_cluster->m_touched = true; |
2039 | out_cluster->m_map.empty (); |
2040 | } |
2041 | |
2042 | /* We don't handle other kinds of overlaps yet. */ |
2043 | |
2044 | return true; |
2045 | } |
2046 | |
2047 | /* Update this cluster to reflect an attempt to merge OTHER where there |
2048 | is no other cluster to merge with, and so we're notionally merging the |
2049 | bound values in OTHER with the initial value of the relevant regions. |
2050 | |
2051 | Any bound keys in OTHER should be bound to unknown in this. */ |
2052 | |
2053 | void |
2054 | binding_cluster::make_unknown_relative_to (const binding_cluster *other, |
2055 | store *out_store, |
2056 | store_manager *mgr) |
2057 | { |
2058 | for (map_t::iterator iter = other->m_map.begin (); |
2059 | iter != other->m_map.end (); ++iter) |
2060 | { |
2061 | const binding_key *iter_key = (*iter).first; |
2062 | const svalue *iter_sval = (*iter).second; |
2063 | const svalue *unknown_sval |
2064 | = mgr->get_svalue_manager ()->get_or_create_unknown_svalue |
2065 | (type: iter_sval->get_type ()); |
2066 | m_map.put (k: iter_key, v: unknown_sval); |
2067 | |
2068 | /* For any pointers in OTHER, the merger means that the |
2069 | concrete pointer becomes an unknown value, which could |
2070 | show up as a false report of a leak when considering what |
2071 | pointers are live before vs after. |
2072 | Avoid this by marking the base regions they point to as having |
2073 | escaped. */ |
2074 | if (const region_svalue *region_sval |
2075 | = iter_sval->dyn_cast_region_svalue ()) |
2076 | { |
2077 | const region *base_reg |
2078 | = region_sval->get_pointee ()->get_base_region (); |
2079 | if (base_reg->tracked_p () |
2080 | && !base_reg->symbolic_for_unknown_ptr_p ()) |
2081 | { |
2082 | binding_cluster *c = out_store->get_or_create_cluster (base_reg); |
2083 | c->mark_as_escaped (); |
2084 | } |
2085 | } |
2086 | } |
2087 | } |
2088 | |
2089 | /* Mark this cluster as having escaped. */ |
2090 | |
2091 | void |
2092 | binding_cluster::mark_as_escaped () |
2093 | { |
2094 | m_escaped = true; |
2095 | } |
2096 | |
2097 | /* If this cluster has escaped (by this call, or by an earlier one, or |
2098 | by being an external param), then unbind all values and mark it |
2099 | as "touched", so that it has a conjured value, rather than an |
2100 | initial_svalue. |
2101 | Use P to purge state involving conjured_svalues. */ |
2102 | |
2103 | void |
2104 | binding_cluster::on_unknown_fncall (const gcall *call, |
2105 | store_manager *mgr, |
2106 | const conjured_purge &p) |
2107 | { |
2108 | if (m_escaped) |
2109 | { |
2110 | m_map.empty (); |
2111 | |
2112 | if (!m_base_region->empty_p ()) |
2113 | { |
2114 | /* Bind it to a new "conjured" value using CALL. */ |
2115 | const svalue *sval |
2116 | = mgr->get_svalue_manager ()->get_or_create_conjured_svalue |
2117 | (type: m_base_region->get_type (), stmt: call, id_reg: m_base_region, p); |
2118 | bind (mgr, reg: m_base_region, sval); |
2119 | } |
2120 | |
2121 | m_touched = true; |
2122 | } |
2123 | } |
2124 | |
2125 | /* Mark this cluster as having been clobbered by STMT. |
2126 | Use P to purge state involving conjured_svalues. */ |
2127 | |
2128 | void |
2129 | binding_cluster::on_asm (const gasm *stmt, |
2130 | store_manager *mgr, |
2131 | const conjured_purge &p) |
2132 | { |
2133 | m_map.empty (); |
2134 | |
2135 | /* Bind it to a new "conjured" value using CALL. */ |
2136 | const svalue *sval |
2137 | = mgr->get_svalue_manager ()->get_or_create_conjured_svalue |
2138 | (type: m_base_region->get_type (), stmt, id_reg: m_base_region, p); |
2139 | bind (mgr, reg: m_base_region, sval); |
2140 | |
2141 | m_touched = true; |
2142 | } |
2143 | |
2144 | /* Return true if this cluster has escaped. */ |
2145 | |
2146 | bool |
2147 | binding_cluster::escaped_p () const |
2148 | { |
2149 | /* Consider the "errno" region to always have escaped. */ |
2150 | if (m_base_region->get_kind () == RK_ERRNO) |
2151 | return true; |
2152 | return m_escaped; |
2153 | } |
2154 | |
2155 | /* Return true if this binding_cluster has no information |
2156 | i.e. if there are no bindings, and it hasn't been marked as having |
2157 | escaped, or touched symbolically. */ |
2158 | |
2159 | bool |
2160 | binding_cluster::redundant_p () const |
2161 | { |
2162 | return (m_map.elements () == 0 |
2163 | && !m_escaped |
2164 | && !m_touched); |
2165 | } |
2166 | |
2167 | /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */ |
2168 | |
2169 | static void |
2170 | append_pathvar_with_type (path_var pv, |
2171 | tree type, |
2172 | auto_vec<path_var> *out_pvs) |
2173 | { |
2174 | gcc_assert (pv.m_tree); |
2175 | |
2176 | if (TREE_TYPE (pv.m_tree) != type) |
2177 | pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree); |
2178 | |
2179 | out_pvs->safe_push (obj: pv); |
2180 | } |
2181 | |
2182 | /* Find representative path_vars for SVAL within this binding of BASE_REG, |
2183 | appending the results to OUT_PVS. */ |
2184 | |
2185 | void |
2186 | binding_cluster::get_representative_path_vars (const region_model *model, |
2187 | svalue_set *visited, |
2188 | const region *base_reg, |
2189 | const svalue *sval, |
2190 | auto_vec<path_var> *out_pvs) |
2191 | const |
2192 | { |
2193 | sval = simplify_for_binding (sval); |
2194 | |
2195 | for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter) |
2196 | { |
2197 | const binding_key *key = (*iter).first; |
2198 | const svalue *bound_sval = (*iter).second; |
2199 | if (bound_sval == sval) |
2200 | { |
2201 | if (const concrete_binding *ckey |
2202 | = key->dyn_cast_concrete_binding ()) |
2203 | { |
2204 | auto_vec <const region *> subregions; |
2205 | base_reg->get_subregions_for_binding |
2206 | (mgr: model->get_manager (), |
2207 | start_bit_offset: ckey->get_start_bit_offset (), |
2208 | size_in_bits: ckey->get_size_in_bits (), |
2209 | type: sval->get_type (), |
2210 | out: &subregions); |
2211 | unsigned i; |
2212 | const region *subregion; |
2213 | FOR_EACH_VEC_ELT (subregions, i, subregion) |
2214 | { |
2215 | if (path_var pv |
2216 | = model->get_representative_path_var (reg: subregion, |
2217 | visited)) |
2218 | append_pathvar_with_type (pv, type: sval->get_type (), out_pvs); |
2219 | } |
2220 | } |
2221 | else |
2222 | { |
2223 | const symbolic_binding *skey = (const symbolic_binding *)key; |
2224 | if (path_var pv |
2225 | = model->get_representative_path_var (reg: skey->get_region (), |
2226 | visited)) |
2227 | append_pathvar_with_type (pv, type: sval->get_type (), out_pvs); |
2228 | } |
2229 | } |
2230 | } |
2231 | } |
2232 | |
2233 | /* Get any svalue bound to KEY, or NULL. */ |
2234 | |
2235 | const svalue * |
2236 | binding_cluster::get_any_value (const binding_key *key) const |
2237 | { |
2238 | return m_map.get (key); |
2239 | } |
2240 | |
2241 | /* If this cluster has a single direct binding for the whole of the region, |
2242 | return it. |
2243 | For use in simplifying dumps. */ |
2244 | |
2245 | const svalue * |
2246 | binding_cluster::maybe_get_simple_value (store_manager *mgr) const |
2247 | { |
2248 | /* Fail gracefully if MGR is NULL to make it easier to dump store |
2249 | instances in the debugger. */ |
2250 | if (mgr == NULL) |
2251 | return NULL; |
2252 | |
2253 | if (m_map.elements () != 1) |
2254 | return NULL; |
2255 | |
2256 | if (m_base_region->empty_p ()) |
2257 | return NULL; |
2258 | |
2259 | const binding_key *key = binding_key::make (mgr, r: m_base_region); |
2260 | return get_any_value (key); |
2261 | } |
2262 | |
2263 | /* class store_manager. */ |
2264 | |
2265 | logger * |
2266 | store_manager::get_logger () const |
2267 | { |
2268 | return m_mgr->get_logger (); |
2269 | } |
2270 | |
2271 | /* binding consolidation. */ |
2272 | |
2273 | const concrete_binding * |
2274 | store_manager::get_concrete_binding (bit_offset_t start_bit_offset, |
2275 | bit_offset_t size_in_bits) |
2276 | { |
2277 | concrete_binding b (start_bit_offset, size_in_bits); |
2278 | if (concrete_binding *existing = m_concrete_binding_key_mgr.get (k: b)) |
2279 | return existing; |
2280 | |
2281 | concrete_binding *to_save = new concrete_binding (b); |
2282 | m_concrete_binding_key_mgr.put (k: b, instance: to_save); |
2283 | return to_save; |
2284 | } |
2285 | |
2286 | const symbolic_binding * |
2287 | store_manager::get_symbolic_binding (const region *reg) |
2288 | { |
2289 | symbolic_binding b (reg); |
2290 | if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (k: b)) |
2291 | return existing; |
2292 | |
2293 | symbolic_binding *to_save = new symbolic_binding (b); |
2294 | m_symbolic_binding_key_mgr.put (k: b, instance: to_save); |
2295 | return to_save; |
2296 | } |
2297 | |
2298 | /* class store. */ |
2299 | |
2300 | /* store's default ctor. */ |
2301 | |
2302 | store::store () |
2303 | : m_called_unknown_fn (false) |
2304 | { |
2305 | } |
2306 | |
2307 | /* store's copy ctor. */ |
2308 | |
2309 | store::store (const store &other) |
2310 | : m_cluster_map (other.m_cluster_map.elements ()), |
2311 | m_called_unknown_fn (other.m_called_unknown_fn) |
2312 | { |
2313 | for (cluster_map_t::iterator iter = other.m_cluster_map.begin (); |
2314 | iter != other.m_cluster_map.end (); |
2315 | ++iter) |
2316 | { |
2317 | const region *reg = (*iter).first; |
2318 | gcc_assert (reg); |
2319 | binding_cluster *c = (*iter).second; |
2320 | gcc_assert (c); |
2321 | m_cluster_map.put (k: reg, v: new binding_cluster (*c)); |
2322 | } |
2323 | } |
2324 | |
2325 | /* store's dtor. */ |
2326 | |
2327 | store::~store () |
2328 | { |
2329 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2330 | iter != m_cluster_map.end (); |
2331 | ++iter) |
2332 | delete (*iter).second; |
2333 | } |
2334 | |
2335 | /* store's assignment operator. */ |
2336 | |
2337 | store & |
2338 | store::operator= (const store &other) |
2339 | { |
2340 | /* Delete existing cluster map. */ |
2341 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2342 | iter != m_cluster_map.end (); |
2343 | ++iter) |
2344 | delete (*iter).second; |
2345 | m_cluster_map.empty (); |
2346 | |
2347 | m_called_unknown_fn = other.m_called_unknown_fn; |
2348 | |
2349 | for (cluster_map_t::iterator iter = other.m_cluster_map.begin (); |
2350 | iter != other.m_cluster_map.end (); |
2351 | ++iter) |
2352 | { |
2353 | const region *reg = (*iter).first; |
2354 | gcc_assert (reg); |
2355 | binding_cluster *c = (*iter).second; |
2356 | gcc_assert (c); |
2357 | m_cluster_map.put (k: reg, v: new binding_cluster (*c)); |
2358 | } |
2359 | return *this; |
2360 | } |
2361 | |
2362 | /* store's equality operator. */ |
2363 | |
2364 | bool |
2365 | store::operator== (const store &other) const |
2366 | { |
2367 | if (m_called_unknown_fn != other.m_called_unknown_fn) |
2368 | return false; |
2369 | |
2370 | if (m_cluster_map.elements () != other.m_cluster_map.elements ()) |
2371 | return false; |
2372 | |
2373 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2374 | iter != m_cluster_map.end (); |
2375 | ++iter) |
2376 | { |
2377 | const region *reg = (*iter).first; |
2378 | binding_cluster *c = (*iter).second; |
2379 | binding_cluster **other_slot |
2380 | = const_cast <cluster_map_t &> (other.m_cluster_map).get (k: reg); |
2381 | if (other_slot == NULL) |
2382 | return false; |
2383 | if (*c != **other_slot) |
2384 | return false; |
2385 | } |
2386 | |
2387 | gcc_checking_assert (hash () == other.hash ()); |
2388 | |
2389 | return true; |
2390 | } |
2391 | |
2392 | /* Get a hash value for this store. */ |
2393 | |
2394 | hashval_t |
2395 | store::hash () const |
2396 | { |
2397 | hashval_t result = 0; |
2398 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2399 | iter != m_cluster_map.end (); |
2400 | ++iter) |
2401 | result ^= (*iter).second->hash (); |
2402 | return result; |
2403 | } |
2404 | |
2405 | /* Populate OUT with a sorted list of parent regions for the regions in IN, |
2406 | removing duplicate parents. */ |
2407 | |
2408 | static void |
2409 | get_sorted_parent_regions (auto_vec<const region *> *out, |
2410 | auto_vec<const region *> &in) |
2411 | { |
2412 | /* Get the set of parent regions. */ |
2413 | hash_set<const region *> parent_regions; |
2414 | const region *iter_reg; |
2415 | unsigned i; |
2416 | FOR_EACH_VEC_ELT (in, i, iter_reg) |
2417 | { |
2418 | const region *parent_reg = iter_reg->get_parent_region (); |
2419 | gcc_assert (parent_reg); |
2420 | parent_regions.add (k: parent_reg); |
2421 | } |
2422 | |
2423 | /* Write to OUT. */ |
2424 | for (hash_set<const region *>::iterator iter = parent_regions.begin(); |
2425 | iter != parent_regions.end(); ++iter) |
2426 | out->safe_push (obj: *iter); |
2427 | |
2428 | /* Sort OUT. */ |
2429 | out->qsort (region::cmp_ptr_ptr); |
2430 | } |
2431 | |
2432 | /* Dump a representation of this store to PP, using SIMPLE to control how |
2433 | svalues and regions are printed. |
2434 | MGR is used for simplifying dumps if non-NULL, but can also be NULL |
2435 | (to make it easier to use from the debugger). */ |
2436 | |
2437 | void |
2438 | store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline, |
2439 | store_manager *mgr) const |
2440 | { |
2441 | /* Sort into some deterministic order. */ |
2442 | auto_vec<const region *> base_regions; |
2443 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2444 | iter != m_cluster_map.end (); ++iter) |
2445 | { |
2446 | const region *base_reg = (*iter).first; |
2447 | base_regions.safe_push (obj: base_reg); |
2448 | } |
2449 | base_regions.qsort (region::cmp_ptr_ptr); |
2450 | |
2451 | /* Gather clusters, organize by parent region, so that we can group |
2452 | together locals, globals, etc. */ |
2453 | auto_vec<const region *> parent_regions; |
2454 | get_sorted_parent_regions (out: &parent_regions, in&: base_regions); |
2455 | |
2456 | const region *parent_reg; |
2457 | unsigned i; |
2458 | FOR_EACH_VEC_ELT (parent_regions, i, parent_reg) |
2459 | { |
2460 | gcc_assert (parent_reg); |
2461 | pp_string (pp, "clusters within " ); |
2462 | parent_reg->dump_to_pp (pp, simple); |
2463 | if (multiline) |
2464 | pp_newline (pp); |
2465 | else |
2466 | pp_string (pp, " {" ); |
2467 | |
2468 | const region *base_reg; |
2469 | unsigned j; |
2470 | FOR_EACH_VEC_ELT (base_regions, j, base_reg) |
2471 | { |
2472 | /* This is O(N * M), but N ought to be small. */ |
2473 | if (base_reg->get_parent_region () != parent_reg) |
2474 | continue; |
2475 | binding_cluster *cluster |
2476 | = *const_cast<cluster_map_t &> (m_cluster_map).get (k: base_reg); |
2477 | if (!multiline) |
2478 | { |
2479 | if (j > 0) |
2480 | pp_string (pp, ", " ); |
2481 | } |
2482 | if (const svalue *sval = cluster->maybe_get_simple_value (mgr)) |
2483 | { |
2484 | /* Special-case to simplify dumps for the common case where |
2485 | we just have one value directly bound to the whole of a |
2486 | region. */ |
2487 | if (multiline) |
2488 | { |
2489 | pp_string (pp, " cluster for: " ); |
2490 | base_reg->dump_to_pp (pp, simple); |
2491 | pp_string (pp, ": " ); |
2492 | sval->dump_to_pp (pp, simple); |
2493 | if (cluster->escaped_p ()) |
2494 | pp_string (pp, " (ESCAPED)" ); |
2495 | if (cluster->touched_p ()) |
2496 | pp_string (pp, " (TOUCHED)" ); |
2497 | pp_newline (pp); |
2498 | } |
2499 | else |
2500 | { |
2501 | pp_string (pp, "region: {" ); |
2502 | base_reg->dump_to_pp (pp, simple); |
2503 | pp_string (pp, ", value: " ); |
2504 | sval->dump_to_pp (pp, simple); |
2505 | if (cluster->escaped_p ()) |
2506 | pp_string (pp, " (ESCAPED)" ); |
2507 | if (cluster->touched_p ()) |
2508 | pp_string (pp, " (TOUCHED)" ); |
2509 | pp_string (pp, "}" ); |
2510 | } |
2511 | } |
2512 | else if (multiline) |
2513 | { |
2514 | pp_string (pp, " cluster for: " ); |
2515 | base_reg->dump_to_pp (pp, simple); |
2516 | pp_newline (pp); |
2517 | cluster->dump_to_pp (pp, simple, multiline); |
2518 | } |
2519 | else |
2520 | { |
2521 | pp_string (pp, "base region: {" ); |
2522 | base_reg->dump_to_pp (pp, simple); |
2523 | pp_string (pp, "} has cluster: {" ); |
2524 | cluster->dump_to_pp (pp, simple, multiline); |
2525 | pp_string (pp, "}" ); |
2526 | } |
2527 | } |
2528 | if (!multiline) |
2529 | pp_string (pp, "}" ); |
2530 | } |
2531 | pp_printf (pp, "m_called_unknown_fn: %s" , |
2532 | m_called_unknown_fn ? "TRUE" : "FALSE" ); |
2533 | if (multiline) |
2534 | pp_newline (pp); |
2535 | } |
2536 | |
2537 | /* Dump a multiline representation of this store to stderr. */ |
2538 | |
2539 | DEBUG_FUNCTION void |
2540 | store::dump (bool simple) const |
2541 | { |
2542 | pretty_printer pp; |
2543 | pp_format_decoder (&pp) = default_tree_printer; |
2544 | pp_show_color (&pp) = pp_show_color (global_dc->printer); |
2545 | pp.buffer->stream = stderr; |
2546 | dump_to_pp (pp: &pp, simple, multiline: true, NULL); |
2547 | pp_newline (&pp); |
2548 | pp_flush (&pp); |
2549 | } |
2550 | |
2551 | /* Assert that this object is valid. */ |
2552 | |
2553 | void |
2554 | store::validate () const |
2555 | { |
2556 | for (auto iter : m_cluster_map) |
2557 | iter.second->validate (); |
2558 | } |
2559 | |
2560 | /* Return a new json::object of the form |
2561 | {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map, |
2562 | ... for each cluster within parent region}, |
2563 | ...for each parent region, |
2564 | "called_unknown_fn": true/false}. */ |
2565 | |
2566 | json::object * |
2567 | store::to_json () const |
2568 | { |
2569 | json::object *store_obj = new json::object (); |
2570 | |
2571 | /* Sort into some deterministic order. */ |
2572 | auto_vec<const region *> base_regions; |
2573 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2574 | iter != m_cluster_map.end (); ++iter) |
2575 | { |
2576 | const region *base_reg = (*iter).first; |
2577 | base_regions.safe_push (obj: base_reg); |
2578 | } |
2579 | base_regions.qsort (region::cmp_ptr_ptr); |
2580 | |
2581 | /* Gather clusters, organize by parent region, so that we can group |
2582 | together locals, globals, etc. */ |
2583 | auto_vec<const region *> parent_regions; |
2584 | get_sorted_parent_regions (out: &parent_regions, in&: base_regions); |
2585 | |
2586 | const region *parent_reg; |
2587 | unsigned i; |
2588 | FOR_EACH_VEC_ELT (parent_regions, i, parent_reg) |
2589 | { |
2590 | gcc_assert (parent_reg); |
2591 | |
2592 | json::object *clusters_in_parent_reg_obj = new json::object (); |
2593 | |
2594 | const region *base_reg; |
2595 | unsigned j; |
2596 | FOR_EACH_VEC_ELT (base_regions, j, base_reg) |
2597 | { |
2598 | /* This is O(N * M), but N ought to be small. */ |
2599 | if (base_reg->get_parent_region () != parent_reg) |
2600 | continue; |
2601 | binding_cluster *cluster |
2602 | = *const_cast<cluster_map_t &> (m_cluster_map).get (k: base_reg); |
2603 | label_text base_reg_desc = base_reg->get_desc (); |
2604 | clusters_in_parent_reg_obj->set (key: base_reg_desc.get (), |
2605 | v: cluster->to_json ()); |
2606 | } |
2607 | label_text parent_reg_desc = parent_reg->get_desc (); |
2608 | store_obj->set (key: parent_reg_desc.get (), v: clusters_in_parent_reg_obj); |
2609 | } |
2610 | |
2611 | store_obj->set (key: "called_unknown_fn" , v: new json::literal (m_called_unknown_fn)); |
2612 | |
2613 | return store_obj; |
2614 | } |
2615 | |
2616 | /* Get any svalue bound to REG, or NULL. */ |
2617 | |
2618 | const svalue * |
2619 | store::get_any_binding (store_manager *mgr, const region *reg) const |
2620 | { |
2621 | const region *base_reg = reg->get_base_region (); |
2622 | binding_cluster **cluster_slot |
2623 | = const_cast <cluster_map_t &> (m_cluster_map).get (k: base_reg); |
2624 | if (!cluster_slot) |
2625 | return NULL; |
2626 | return (*cluster_slot)->get_any_binding (mgr, reg); |
2627 | } |
2628 | |
2629 | /* Set the value of LHS_REG to RHS_SVAL. */ |
2630 | |
2631 | void |
2632 | store::set_value (store_manager *mgr, const region *lhs_reg, |
2633 | const svalue *rhs_sval, |
2634 | uncertainty_t *uncertainty) |
2635 | { |
2636 | logger *logger = mgr->get_logger (); |
2637 | LOG_SCOPE (logger); |
2638 | |
2639 | remove_overlapping_bindings (mgr, reg: lhs_reg, uncertainty); |
2640 | |
2641 | if (lhs_reg->get_type ()) |
2642 | rhs_sval = simplify_for_binding (sval: rhs_sval); |
2643 | /* ...but if we have no type for the region, retain any cast. */ |
2644 | |
2645 | const region *lhs_base_reg = lhs_reg->get_base_region (); |
2646 | binding_cluster *lhs_cluster; |
2647 | if (lhs_base_reg->symbolic_for_unknown_ptr_p ()) |
2648 | { |
2649 | /* Reject attempting to bind values into a symbolic region |
2650 | for an unknown ptr; merely invalidate values below. */ |
2651 | lhs_cluster = NULL; |
2652 | |
2653 | /* The LHS of the write is *UNKNOWN. If the RHS is a pointer, |
2654 | then treat the region being pointed to as having escaped. */ |
2655 | if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ()) |
2656 | { |
2657 | const region *ptr_dst = ptr_sval->get_pointee (); |
2658 | const region *ptr_base_reg = ptr_dst->get_base_region (); |
2659 | mark_as_escaped (base_reg: ptr_base_reg); |
2660 | } |
2661 | if (uncertainty) |
2662 | uncertainty->on_maybe_bound_sval (sval: rhs_sval); |
2663 | } |
2664 | else if (lhs_base_reg->tracked_p ()) |
2665 | { |
2666 | lhs_cluster = get_or_create_cluster (base_reg: lhs_base_reg); |
2667 | lhs_cluster->bind (mgr, reg: lhs_reg, sval: rhs_sval); |
2668 | } |
2669 | else |
2670 | { |
2671 | /* Reject attempting to bind values into an untracked region; |
2672 | merely invalidate values below. */ |
2673 | lhs_cluster = NULL; |
2674 | } |
2675 | |
2676 | /* Bindings to a cluster can affect other clusters if a symbolic |
2677 | base region is involved. |
2678 | Writes to concrete clusters can't affect other concrete clusters, |
2679 | but can affect symbolic clusters. |
2680 | Writes to symbolic clusters can affect both concrete and symbolic |
2681 | clusters. |
2682 | Invalidate our knowledge of other clusters that might have been |
2683 | affected by the write. |
2684 | Gather the set of all svalues that might still be live even if |
2685 | the store doesn't refer to them. */ |
2686 | svalue_set maybe_live_values; |
2687 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
2688 | iter != m_cluster_map.end (); ++iter) |
2689 | { |
2690 | const region *iter_base_reg = (*iter).first; |
2691 | binding_cluster *iter_cluster = (*iter).second; |
2692 | if (iter_base_reg != lhs_base_reg |
2693 | && (lhs_cluster == NULL |
2694 | || lhs_cluster->symbolic_p () |
2695 | || iter_cluster->symbolic_p ())) |
2696 | { |
2697 | tristate t_alias = eval_alias (base_reg_a: lhs_base_reg, base_reg_b: iter_base_reg); |
2698 | switch (t_alias.get_value ()) |
2699 | { |
2700 | default: |
2701 | gcc_unreachable (); |
2702 | |
2703 | case tristate::TS_UNKNOWN: |
2704 | if (logger) |
2705 | { |
2706 | pretty_printer *pp = logger->get_printer (); |
2707 | logger->start_log_line (); |
2708 | logger->log_partial (fmt: "possible aliasing of " ); |
2709 | iter_base_reg->dump_to_pp (pp, simple: true); |
2710 | logger->log_partial (fmt: " when writing SVAL: " ); |
2711 | rhs_sval->dump_to_pp (pp, simple: true); |
2712 | logger->log_partial (fmt: " to LHS_REG: " ); |
2713 | lhs_reg->dump_to_pp (pp, simple: true); |
2714 | logger->end_log_line (); |
2715 | } |
2716 | /* Mark all of iter_cluster's iter_base_reg as unknown, |
2717 | using LHS_REG when considering overlaps, to handle |
2718 | symbolic vs concrete issues. */ |
2719 | iter_cluster->mark_region_as_unknown |
2720 | (mgr, |
2721 | reg_to_bind: iter_base_reg, /* reg_to_bind */ |
2722 | reg_for_overlap: lhs_reg, /* reg_for_overlap */ |
2723 | uncertainty, |
2724 | maybe_live_values: &maybe_live_values); |
2725 | break; |
2726 | |
2727 | case tristate::TS_TRUE: |
2728 | gcc_unreachable (); |
2729 | break; |
2730 | |
2731 | case tristate::TS_FALSE: |
2732 | /* If they can't be aliases, then don't invalidate this |
2733 | cluster. */ |
2734 | break; |
2735 | } |
2736 | } |
2737 | } |
2738 | /* Given the set of svalues that might still be live, process them |
2739 | (e.g. marking regions as escaped). |
2740 | We do this after the iteration to avoid potentially changing |
2741 | m_cluster_map whilst iterating over it. */ |
2742 | on_maybe_live_values (maybe_live_values); |
2743 | } |
2744 | |
2745 | /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */ |
2746 | |
2747 | tristate |
2748 | store::eval_alias (const region *base_reg_a, |
2749 | const region *base_reg_b) const |
2750 | { |
2751 | /* SSA names can't alias. */ |
2752 | tree decl_a = base_reg_a->maybe_get_decl (); |
2753 | if (decl_a && TREE_CODE (decl_a) == SSA_NAME) |
2754 | return tristate::TS_FALSE; |
2755 | tree decl_b = base_reg_b->maybe_get_decl (); |
2756 | if (decl_b && TREE_CODE (decl_b) == SSA_NAME) |
2757 | return tristate::TS_FALSE; |
2758 | |
2759 | /* Try both ways, for symmetry. */ |
2760 | tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b); |
2761 | if (ts_ab.is_false ()) |
2762 | return tristate::TS_FALSE; |
2763 | tristate ts_ba = eval_alias_1 (base_reg_a: base_reg_b, base_reg_b: base_reg_a); |
2764 | if (ts_ba.is_false ()) |
2765 | return tristate::TS_FALSE; |
2766 | return tristate::TS_UNKNOWN; |
2767 | } |
2768 | |
2769 | /* Half of store::eval_alias; called twice for symmetry. */ |
2770 | |
2771 | tristate |
2772 | store::eval_alias_1 (const region *base_reg_a, |
2773 | const region *base_reg_b) const |
2774 | { |
2775 | /* If they're in different memory spaces, they can't alias. */ |
2776 | { |
2777 | enum memory_space memspace_a = base_reg_a->get_memory_space (); |
2778 | if (memspace_a != MEMSPACE_UNKNOWN) |
2779 | { |
2780 | enum memory_space memspace_b = base_reg_b->get_memory_space (); |
2781 | if (memspace_b != MEMSPACE_UNKNOWN |
2782 | && memspace_a != memspace_b) |
2783 | return tristate::TS_FALSE; |
2784 | } |
2785 | } |
2786 | |
2787 | if (const symbolic_region *sym_reg_a |
2788 | = base_reg_a->dyn_cast_symbolic_region ()) |
2789 | { |
2790 | const svalue *sval_a = sym_reg_a->get_pointer (); |
2791 | if (tree decl_b = base_reg_b->maybe_get_decl ()) |
2792 | { |
2793 | if (!may_be_aliased (var: decl_b)) |
2794 | return tristate::TS_FALSE; |
2795 | if (sval_a->get_kind () == SK_INITIAL) |
2796 | if (!is_global_var (t: decl_b)) |
2797 | { |
2798 | /* The initial value of a pointer can't point to a local. */ |
2799 | return tristate::TS_FALSE; |
2800 | } |
2801 | } |
2802 | if (sval_a->get_kind () == SK_INITIAL |
2803 | && base_reg_b->get_kind () == RK_HEAP_ALLOCATED) |
2804 | { |
2805 | /* The initial value of a pointer can't point to a |
2806 | region that was allocated on the heap after the beginning of the |
2807 | path. */ |
2808 | return tristate::TS_FALSE; |
2809 | } |
2810 | if (const widening_svalue *widening_sval_a |
2811 | = sval_a->dyn_cast_widening_svalue ()) |
2812 | { |
2813 | const svalue *base = widening_sval_a->get_base_svalue (); |
2814 | if (const region_svalue *region_sval |
2815 | = base->dyn_cast_region_svalue ()) |
2816 | { |
2817 | const region *pointee = region_sval->get_pointee (); |
2818 | /* If we have sval_a is WIDENING(®ION, OP), and |
2819 | B can't alias REGION, then B can't alias A either. |
2820 | For example, A might arise from |
2821 | for (ptr = ®ION; ...; ptr++) |
2822 | where sval_a is ptr in the 2nd iteration of the loop. |
2823 | We want to ensure that "*ptr" can only clobber things |
2824 | within REGION's base region. */ |
2825 | tristate ts = eval_alias (base_reg_a: pointee->get_base_region (), |
2826 | base_reg_b); |
2827 | if (ts.is_false ()) |
2828 | return tristate::TS_FALSE; |
2829 | } |
2830 | } |
2831 | } |
2832 | return tristate::TS_UNKNOWN; |
2833 | } |
2834 | |
2835 | /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */ |
2836 | |
2837 | void |
2838 | store::on_maybe_live_values (const svalue_set &maybe_live_values) |
2839 | { |
2840 | for (auto sval : maybe_live_values) |
2841 | { |
2842 | if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ()) |
2843 | { |
2844 | const region *base_reg = ptr_sval->get_pointee ()->get_base_region (); |
2845 | mark_as_escaped (base_reg); |
2846 | } |
2847 | } |
2848 | } |
2849 | |
2850 | /* Remove all bindings overlapping REG within this store. */ |
2851 | |
2852 | void |
2853 | store::clobber_region (store_manager *mgr, const region *reg) |
2854 | { |
2855 | const region *base_reg = reg->get_base_region (); |
2856 | binding_cluster **slot = m_cluster_map.get (k: base_reg); |
2857 | if (!slot) |
2858 | return; |
2859 | binding_cluster *cluster = *slot; |
2860 | cluster->clobber_region (mgr, reg); |
2861 | if (cluster->redundant_p ()) |
2862 | { |
2863 | delete cluster; |
2864 | m_cluster_map.remove (k: base_reg); |
2865 | } |
2866 | } |
2867 | |
2868 | /* Remove any bindings for REG within this store. */ |
2869 | |
2870 | void |
2871 | store::purge_region (store_manager *mgr, const region *reg) |
2872 | { |
2873 | const region *base_reg = reg->get_base_region (); |
2874 | binding_cluster **slot = m_cluster_map.get (k: base_reg); |
2875 | if (!slot) |
2876 | return; |
2877 | binding_cluster *cluster = *slot; |
2878 | cluster->purge_region (mgr, reg); |
2879 | if (cluster->redundant_p ()) |
2880 | { |
2881 | delete cluster; |
2882 | m_cluster_map.remove (k: base_reg); |
2883 | } |
2884 | } |
2885 | |
2886 | /* Fill REG with SVAL. */ |
2887 | |
2888 | void |
2889 | store::fill_region (store_manager *mgr, const region *reg, const svalue *sval) |
2890 | { |
2891 | /* Filling an empty region is a no-op. */ |
2892 | if (reg->empty_p ()) |
2893 | return; |
2894 | |
2895 | const region *base_reg = reg->get_base_region (); |
2896 | if (base_reg->symbolic_for_unknown_ptr_p () |
2897 | || !base_reg->tracked_p ()) |
2898 | return; |
2899 | binding_cluster *cluster = get_or_create_cluster (base_reg); |
2900 | cluster->fill_region (mgr, reg, sval); |
2901 | } |
2902 | |
2903 | /* Zero-fill REG. */ |
2904 | |
2905 | void |
2906 | store::zero_fill_region (store_manager *mgr, const region *reg) |
2907 | { |
2908 | region_model_manager *sval_mgr = mgr->get_svalue_manager (); |
2909 | const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, cst: 0); |
2910 | fill_region (mgr, reg, sval: zero_sval); |
2911 | } |
2912 | |
2913 | /* Mark REG as having unknown content. */ |
2914 | |
2915 | void |
2916 | store::mark_region_as_unknown (store_manager *mgr, const region *reg, |
2917 | uncertainty_t *uncertainty, |
2918 | svalue_set *maybe_live_values) |
2919 | { |
2920 | const region *base_reg = reg->get_base_region (); |
2921 | if (base_reg->symbolic_for_unknown_ptr_p () |
2922 | || !base_reg->tracked_p ()) |
2923 | return; |
2924 | binding_cluster *cluster = get_or_create_cluster (base_reg); |
2925 | cluster->mark_region_as_unknown (mgr, reg_to_bind: reg, reg_for_overlap: reg, uncertainty, |
2926 | maybe_live_values); |
2927 | } |
2928 | |
2929 | /* Purge state involving SVAL. */ |
2930 | |
2931 | void |
2932 | store::purge_state_involving (const svalue *sval, |
2933 | region_model_manager *sval_mgr) |
2934 | { |
2935 | auto_vec <const region *> base_regs_to_purge; |
2936 | for (auto iter : m_cluster_map) |
2937 | { |
2938 | const region *base_reg = iter.first; |
2939 | if (base_reg->involves_p (sval)) |
2940 | base_regs_to_purge.safe_push (obj: base_reg); |
2941 | else |
2942 | { |
2943 | binding_cluster *cluster = iter.second; |
2944 | cluster->purge_state_involving (sval, sval_mgr); |
2945 | } |
2946 | } |
2947 | |
2948 | for (auto iter : base_regs_to_purge) |
2949 | purge_cluster (base_reg: iter); |
2950 | } |
2951 | |
2952 | /* Get the cluster for BASE_REG, or NULL (const version). */ |
2953 | |
2954 | const binding_cluster * |
2955 | store::get_cluster (const region *base_reg) const |
2956 | { |
2957 | gcc_assert (base_reg); |
2958 | gcc_assert (base_reg->get_base_region () == base_reg); |
2959 | if (binding_cluster **slot |
2960 | = const_cast <cluster_map_t &> (m_cluster_map).get (k: base_reg)) |
2961 | return *slot; |
2962 | else |
2963 | return NULL; |
2964 | } |
2965 | |
2966 | /* Get the cluster for BASE_REG, or NULL (non-const version). */ |
2967 | |
2968 | binding_cluster * |
2969 | store::get_cluster (const region *base_reg) |
2970 | { |
2971 | gcc_assert (base_reg); |
2972 | gcc_assert (base_reg->get_base_region () == base_reg); |
2973 | if (binding_cluster **slot = m_cluster_map.get (k: base_reg)) |
2974 | return *slot; |
2975 | else |
2976 | return NULL; |
2977 | } |
2978 | |
2979 | /* Get the cluster for BASE_REG, creating it if doesn't already exist. */ |
2980 | |
2981 | binding_cluster * |
2982 | store::get_or_create_cluster (const region *base_reg) |
2983 | { |
2984 | gcc_assert (base_reg); |
2985 | gcc_assert (base_reg->get_base_region () == base_reg); |
2986 | |
2987 | /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */ |
2988 | gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ()); |
2989 | |
2990 | /* We shouldn't create clusters for base regions that aren't trackable. */ |
2991 | gcc_assert (base_reg->tracked_p ()); |
2992 | |
2993 | if (binding_cluster **slot = m_cluster_map.get (k: base_reg)) |
2994 | return *slot; |
2995 | |
2996 | binding_cluster *cluster = new binding_cluster (base_reg); |
2997 | m_cluster_map.put (k: base_reg, v: cluster); |
2998 | |
2999 | return cluster; |
3000 | } |
3001 | |
3002 | /* Remove any cluster for BASE_REG, for use by |
3003 | region_model::unbind_region_and_descendents |
3004 | when popping stack frames and handling deleted heap regions. */ |
3005 | |
3006 | void |
3007 | store::purge_cluster (const region *base_reg) |
3008 | { |
3009 | gcc_assert (base_reg->get_base_region () == base_reg); |
3010 | binding_cluster **slot = m_cluster_map.get (k: base_reg); |
3011 | if (!slot) |
3012 | return; |
3013 | binding_cluster *cluster = *slot; |
3014 | delete cluster; |
3015 | m_cluster_map.remove (k: base_reg); |
3016 | } |
3017 | |
3018 | /* Attempt to merge STORE_A and STORE_B into OUT_STORE. |
3019 | Return true if successful, or false if the stores can't be merged. */ |
3020 | |
3021 | bool |
3022 | store::can_merge_p (const store *store_a, const store *store_b, |
3023 | store *out_store, store_manager *mgr, |
3024 | model_merger *merger) |
3025 | { |
3026 | if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn) |
3027 | out_store->m_called_unknown_fn = true; |
3028 | |
3029 | /* Get the union of all base regions for STORE_A and STORE_B. */ |
3030 | hash_set<const region *> base_regions; |
3031 | for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin (); |
3032 | iter_a != store_a->m_cluster_map.end (); ++iter_a) |
3033 | { |
3034 | const region *base_reg_a = (*iter_a).first; |
3035 | base_regions.add (k: base_reg_a); |
3036 | } |
3037 | for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin (); |
3038 | iter_b != store_b->m_cluster_map.end (); ++iter_b) |
3039 | { |
3040 | const region *base_reg_b = (*iter_b).first; |
3041 | base_regions.add (k: base_reg_b); |
3042 | } |
3043 | |
3044 | /* Sort the base regions before considering them. This ought not to |
3045 | affect the results, but can affect which types UNKNOWN_REGIONs are |
3046 | created for in a run; sorting them thus avoids minor differences |
3047 | in logfiles. */ |
3048 | auto_vec<const region *> vec_base_regions (base_regions.elements ()); |
3049 | for (hash_set<const region *>::iterator iter = base_regions.begin (); |
3050 | iter != base_regions.end (); ++iter) |
3051 | vec_base_regions.quick_push (obj: *iter); |
3052 | vec_base_regions.qsort (region::cmp_ptr_ptr); |
3053 | unsigned i; |
3054 | const region *base_reg; |
3055 | FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg) |
3056 | { |
3057 | const binding_cluster *cluster_a = store_a->get_cluster (base_reg); |
3058 | const binding_cluster *cluster_b = store_b->get_cluster (base_reg); |
3059 | /* At least one of cluster_a and cluster_b must be non-NULL. */ |
3060 | binding_cluster *out_cluster |
3061 | = out_store->get_or_create_cluster (base_reg); |
3062 | if (!binding_cluster::can_merge_p (cluster_a, cluster_b, |
3063 | out_cluster, out_store, mgr, merger)) |
3064 | return false; |
3065 | } |
3066 | return true; |
3067 | } |
3068 | |
3069 | /* Mark the cluster for BASE_REG as having escaped. |
3070 | For use when handling an unrecognized function call, and |
3071 | for params to "top-level" calls. |
3072 | Further unknown function calls could touch it, even if the cluster |
3073 | isn't reachable from args of those calls. */ |
3074 | |
3075 | void |
3076 | store::mark_as_escaped (const region *base_reg) |
3077 | { |
3078 | gcc_assert (base_reg); |
3079 | gcc_assert (base_reg->get_base_region () == base_reg); |
3080 | |
3081 | if (base_reg->symbolic_for_unknown_ptr_p () |
3082 | || !base_reg->tracked_p ()) |
3083 | return; |
3084 | |
3085 | binding_cluster *cluster = get_or_create_cluster (base_reg); |
3086 | cluster->mark_as_escaped (); |
3087 | } |
3088 | |
3089 | /* Handle an unknown fncall by updating any clusters that have escaped |
3090 | (either in this fncall, or in a prior one). */ |
3091 | |
3092 | void |
3093 | store::on_unknown_fncall (const gcall *call, store_manager *mgr, |
3094 | const conjured_purge &p) |
3095 | { |
3096 | m_called_unknown_fn = true; |
3097 | |
3098 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
3099 | iter != m_cluster_map.end (); ++iter) |
3100 | (*iter).second->on_unknown_fncall (call, mgr, p); |
3101 | } |
3102 | |
3103 | /* Return true if a non-const pointer to BASE_REG (or something within it) |
3104 | has escaped to code outside of the TU being analyzed. */ |
3105 | |
3106 | bool |
3107 | store::escaped_p (const region *base_reg) const |
3108 | { |
3109 | gcc_assert (base_reg); |
3110 | gcc_assert (base_reg->get_base_region () == base_reg); |
3111 | |
3112 | /* "errno" can always be modified by external code. */ |
3113 | if (base_reg->get_kind () == RK_ERRNO) |
3114 | return true; |
3115 | |
3116 | if (binding_cluster **cluster_slot |
3117 | = const_cast <cluster_map_t &>(m_cluster_map).get (k: base_reg)) |
3118 | return (*cluster_slot)->escaped_p (); |
3119 | return false; |
3120 | } |
3121 | |
3122 | /* Populate OUT_PVS with a list of path_vars for describing SVAL based on |
3123 | this store, using VISITED to ensure the traversal terminates. */ |
3124 | |
3125 | void |
3126 | store::get_representative_path_vars (const region_model *model, |
3127 | svalue_set *visited, |
3128 | const svalue *sval, |
3129 | auto_vec<path_var> *out_pvs) const |
3130 | { |
3131 | gcc_assert (sval); |
3132 | |
3133 | /* Find all bindings that reference SVAL. */ |
3134 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
3135 | iter != m_cluster_map.end (); ++iter) |
3136 | { |
3137 | const region *base_reg = (*iter).first; |
3138 | binding_cluster *cluster = (*iter).second; |
3139 | cluster->get_representative_path_vars (model, visited, base_reg, sval, |
3140 | out_pvs); |
3141 | } |
3142 | |
3143 | if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ()) |
3144 | { |
3145 | const region *reg = init_sval->get_region (); |
3146 | if (path_var pv = model->get_representative_path_var (reg, |
3147 | visited)) |
3148 | out_pvs->safe_push (obj: pv); |
3149 | } |
3150 | } |
3151 | |
3152 | /* Remove all bindings overlapping REG within this store, removing |
3153 | any clusters that become redundant. |
3154 | |
3155 | If UNCERTAINTY is non-NULL, use it to record any svalues that |
3156 | were removed, as being maybe-bound. */ |
3157 | |
3158 | void |
3159 | store::remove_overlapping_bindings (store_manager *mgr, const region *reg, |
3160 | uncertainty_t *uncertainty) |
3161 | { |
3162 | const region *base_reg = reg->get_base_region (); |
3163 | if (binding_cluster **cluster_slot = m_cluster_map.get (k: base_reg)) |
3164 | { |
3165 | binding_cluster *cluster = *cluster_slot; |
3166 | if (reg == base_reg && !escaped_p (base_reg)) |
3167 | { |
3168 | /* Remove whole cluster. */ |
3169 | m_cluster_map.remove (k: base_reg); |
3170 | delete cluster; |
3171 | return; |
3172 | } |
3173 | /* Pass NULL for the maybe_live_values here, as we don't want to |
3174 | record the old svalues as being maybe-bound. */ |
3175 | cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL); |
3176 | } |
3177 | } |
3178 | |
3179 | /* Subclass of visitor that accumulates a hash_set of the regions that |
3180 | were visited. */ |
3181 | |
3182 | struct region_finder : public visitor |
3183 | { |
3184 | void visit_region (const region *reg) final override |
3185 | { |
3186 | m_regs.add (k: reg); |
3187 | } |
3188 | |
3189 | hash_set<const region *> m_regs; |
3190 | }; |
3191 | |
3192 | /* Canonicalize this store, to maximize the chance of equality between |
3193 | instances. */ |
3194 | |
3195 | void |
3196 | store::canonicalize (store_manager *mgr) |
3197 | { |
3198 | /* If we have e.g.: |
3199 | cluster for: HEAP_ALLOCATED_REGION(543) |
3200 | ESCAPED |
3201 | TOUCHED |
3202 | where the heap region is empty and unreferenced, then purge that |
3203 | cluster, to avoid unbounded state chains involving these. */ |
3204 | |
3205 | /* Find regions that are referenced by bound values in the store. */ |
3206 | region_finder s; |
3207 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
3208 | iter != m_cluster_map.end (); ++iter) |
3209 | { |
3210 | binding_cluster *cluster = (*iter).second; |
3211 | for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin (); |
3212 | bind_iter != cluster->m_map.end (); ++bind_iter) |
3213 | (*bind_iter).second->accept (v: &s); |
3214 | } |
3215 | |
3216 | /* Locate heap-allocated regions that have empty bindings that weren't |
3217 | found above. */ |
3218 | hash_set<const region *> purgeable_regions; |
3219 | for (cluster_map_t::iterator iter = m_cluster_map.begin (); |
3220 | iter != m_cluster_map.end (); ++iter) |
3221 | { |
3222 | const region *base_reg = (*iter).first; |
3223 | binding_cluster *cluster = (*iter).second; |
3224 | if (base_reg->get_kind () == RK_HEAP_ALLOCATED) |
3225 | { |
3226 | /* Don't purge a heap-allocated region that's been marked as |
3227 | escaping, since this could be recording that a ptr to it |
3228 | was written to an unknown symbolic region along this |
3229 | path, and so we don't know whether it's referenced or |
3230 | not, and hence should report it as leaking |
3231 | (PR analyzer/106473). */ |
3232 | if (cluster->escaped_p ()) |
3233 | continue; |
3234 | |
3235 | if (cluster->empty_p ()) |
3236 | if (!s.m_regs.contains (k: base_reg)) |
3237 | purgeable_regions.add (k: base_reg); |
3238 | |
3239 | /* Also cover the UNKNOWN case. */ |
3240 | if (const svalue *sval = cluster->maybe_get_simple_value (mgr)) |
3241 | if (sval->get_kind () == SK_UNKNOWN) |
3242 | if (!s.m_regs.contains (k: base_reg)) |
3243 | purgeable_regions.add (k: base_reg); |
3244 | } |
3245 | } |
3246 | |
3247 | /* Purge them. */ |
3248 | for (hash_set<const region *>::iterator iter = purgeable_regions.begin (); |
3249 | iter != purgeable_regions.end (); ++iter) |
3250 | { |
3251 | const region *base_reg = *iter; |
3252 | purge_cluster (base_reg); |
3253 | } |
3254 | } |
3255 | |
3256 | /* Subroutine for use by exploded_path::feasible_p. |
3257 | |
3258 | We need to deal with state differences between: |
3259 | (a) when the exploded_graph is being initially constructed and |
3260 | (b) when replaying the state changes along a specific path in |
3261 | in exploded_path::feasible_p. |
3262 | |
3263 | In (a), state merging happens, so when exploring a loop |
3264 | for (i = 0; i < 1024; i++) |
3265 | on successive iterations we have i == 0, then i == WIDENING. |
3266 | |
3267 | In (b), no state merging happens, so naively replaying the path |
3268 | that goes twice through the loop then exits it |
3269 | would lead to i == 0, then i == 1, and then a (i >= 1024) eedge |
3270 | that exits the loop, which would be found to be infeasible as i == 1, |
3271 | and the path would be rejected. |
3272 | |
3273 | We need to fix up state during replay. This subroutine is |
3274 | called whenever we enter a supernode that we've already |
3275 | visited along this exploded_path, passing in OTHER_STORE |
3276 | from the destination enode's state. |
3277 | |
3278 | Find bindings to widening values in OTHER_STORE. |
3279 | For all that are found, update the binding in this store to UNKNOWN. */ |
3280 | |
3281 | void |
3282 | store::loop_replay_fixup (const store *other_store, |
3283 | region_model_manager *mgr) |
3284 | { |
3285 | gcc_assert (other_store); |
3286 | for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin (); |
3287 | iter != other_store->m_cluster_map.end (); ++iter) |
3288 | { |
3289 | const region *base_reg = (*iter).first; |
3290 | binding_cluster *cluster = (*iter).second; |
3291 | for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin (); |
3292 | bind_iter != cluster->m_map.end (); ++bind_iter) |
3293 | { |
3294 | const binding_key *key = (*bind_iter).first; |
3295 | const svalue *sval = (*bind_iter).second; |
3296 | if (sval->get_kind () == SK_WIDENING) |
3297 | { |
3298 | binding_cluster *this_cluster |
3299 | = get_or_create_cluster (base_reg); |
3300 | const svalue *unknown |
3301 | = mgr->get_or_create_unknown_svalue (type: sval->get_type ()); |
3302 | this_cluster->bind_key (key, sval: unknown); |
3303 | } |
3304 | } |
3305 | } |
3306 | } |
3307 | |
3308 | /* Use R to replay the bindings from SUMMARY into this object. */ |
3309 | |
3310 | void |
3311 | store::replay_call_summary (call_summary_replay &r, |
3312 | const store &summary) |
3313 | { |
3314 | if (summary.m_called_unknown_fn) |
3315 | { |
3316 | /* A call to an external function occurred in the summary. |
3317 | Hence we need to invalidate our knownledge of globals, |
3318 | escaped regions, etc. */ |
3319 | on_unknown_fncall (call: r.get_call_stmt (), |
3320 | mgr: r.get_store_manager (), |
3321 | p: conjured_purge (r.get_caller_model (), |
3322 | r.get_ctxt ())); |
3323 | } |
3324 | |
3325 | auto_vec<const region *> keys (summary.m_cluster_map.elements ()); |
3326 | for (auto kv : summary.m_cluster_map) |
3327 | keys.quick_push (obj: kv.first); |
3328 | keys.qsort (region::cmp_ptr_ptr); |
3329 | for (auto base_reg : keys) |
3330 | replay_call_summary_cluster (r, summary, base_reg); |
3331 | } |
3332 | |
3333 | /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER |
3334 | into this object. */ |
3335 | |
3336 | void |
3337 | store::replay_call_summary_cluster (call_summary_replay &r, |
3338 | const store &summary, |
3339 | const region *summary_base_reg) |
3340 | { |
3341 | const call_details &cd = r.get_call_details (); |
3342 | region_model_manager *reg_mgr = r.get_manager (); |
3343 | store_manager *mgr = reg_mgr->get_store_manager (); |
3344 | const binding_cluster *summary_cluster |
3345 | = summary.get_cluster (base_reg: summary_base_reg); |
3346 | |
3347 | /* Handle "ESCAPED" and "TOUCHED" flags. */ |
3348 | if (summary_cluster->escaped_p () || summary_cluster->touched_p ()) |
3349 | if (const region *caller_reg |
3350 | = r.convert_region_from_summary (summary_base_reg)) |
3351 | { |
3352 | const region *caller_base_reg = caller_reg->get_base_region (); |
3353 | if (caller_base_reg->tracked_p () |
3354 | && !caller_base_reg->symbolic_for_unknown_ptr_p ()) |
3355 | { |
3356 | binding_cluster *caller_cluster |
3357 | = get_or_create_cluster (base_reg: caller_base_reg); |
3358 | if (summary_cluster->escaped_p ()) |
3359 | caller_cluster->mark_as_escaped (); |
3360 | if (summary_cluster->touched_p ()) |
3361 | caller_cluster->m_touched = true; |
3362 | } |
3363 | } |
3364 | |
3365 | switch (summary_base_reg->get_kind ()) |
3366 | { |
3367 | /* Top-level regions. */ |
3368 | case RK_FRAME: |
3369 | case RK_GLOBALS: |
3370 | case RK_CODE: |
3371 | case RK_STACK: |
3372 | case RK_HEAP: |
3373 | case RK_THREAD_LOCAL: |
3374 | case RK_ROOT: |
3375 | /* Child regions. */ |
3376 | case RK_FIELD: |
3377 | case RK_ELEMENT: |
3378 | case RK_OFFSET: |
3379 | case RK_SIZED: |
3380 | case RK_CAST: |
3381 | case RK_BIT_RANGE: |
3382 | /* Other regions. */ |
3383 | case RK_VAR_ARG: |
3384 | case RK_UNKNOWN: |
3385 | /* These should never be the base region of a binding cluster. */ |
3386 | gcc_unreachable (); |
3387 | break; |
3388 | |
3389 | case RK_FUNCTION: |
3390 | case RK_LABEL: |
3391 | case RK_STRING: |
3392 | /* These can be marked as escaping. */ |
3393 | break; |
3394 | |
3395 | case RK_SYMBOLIC: |
3396 | { |
3397 | const symbolic_region *summary_symbolic_reg |
3398 | = as_a <const symbolic_region *> (p: summary_base_reg); |
3399 | const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer (); |
3400 | const svalue *caller_ptr_sval |
3401 | = r.convert_svalue_from_summary (summary_ptr_sval); |
3402 | if (!caller_ptr_sval) |
3403 | return; |
3404 | const region *caller_dest_reg |
3405 | = cd.get_model ()->deref_rvalue (ptr_sval: caller_ptr_sval, |
3406 | NULL_TREE, |
3407 | ctxt: cd.get_ctxt ()); |
3408 | const svalue *summary_sval |
3409 | = summary.get_any_binding (mgr, reg: summary_base_reg); |
3410 | if (!summary_sval) |
3411 | return; |
3412 | const svalue *caller_sval |
3413 | = r.convert_svalue_from_summary (summary_sval); |
3414 | if (!caller_sval) |
3415 | caller_sval = |
3416 | reg_mgr->get_or_create_unknown_svalue (type: summary_sval->get_type ()); |
3417 | set_value (mgr, lhs_reg: caller_dest_reg, |
3418 | rhs_sval: caller_sval, NULL /* uncertainty_t * */); |
3419 | } |
3420 | break; |
3421 | |
3422 | case RK_HEAP_ALLOCATED: |
3423 | case RK_DECL: |
3424 | case RK_ERRNO: |
3425 | case RK_PRIVATE: |
3426 | { |
3427 | const region *caller_dest_reg |
3428 | = r.convert_region_from_summary (summary_base_reg); |
3429 | if (!caller_dest_reg) |
3430 | return; |
3431 | const svalue *summary_sval |
3432 | = summary.get_any_binding (mgr, reg: summary_base_reg); |
3433 | if (!summary_sval) |
3434 | summary_sval = reg_mgr->get_or_create_compound_svalue |
3435 | (type: summary_base_reg->get_type (), |
3436 | map: summary_cluster->get_map ()); |
3437 | const svalue *caller_sval |
3438 | = r.convert_svalue_from_summary (summary_sval); |
3439 | if (!caller_sval) |
3440 | caller_sval = |
3441 | reg_mgr->get_or_create_unknown_svalue (type: summary_sval->get_type ()); |
3442 | set_value (mgr, lhs_reg: caller_dest_reg, |
3443 | rhs_sval: caller_sval, NULL /* uncertainty_t * */); |
3444 | } |
3445 | break; |
3446 | |
3447 | case RK_ALLOCA: |
3448 | /* Ignore bindings of alloca regions in the summary. */ |
3449 | break; |
3450 | } |
3451 | } |
3452 | |
3453 | #if CHECKING_P |
3454 | |
3455 | namespace selftest { |
3456 | |
3457 | /* Verify that bit_range::intersects_p works as expected. */ |
3458 | |
3459 | static void |
3460 | test_bit_range_intersects_p () |
3461 | { |
3462 | bit_range b0 (0, 1); |
3463 | bit_range b1 (1, 1); |
3464 | bit_range b2 (2, 1); |
3465 | bit_range b3 (3, 1); |
3466 | bit_range b4 (4, 1); |
3467 | bit_range b5 (5, 1); |
3468 | bit_range b6 (6, 1); |
3469 | bit_range b7 (7, 1); |
3470 | bit_range b1_to_6 (1, 6); |
3471 | bit_range b0_to_7 (0, 8); |
3472 | bit_range b3_to_5 (3, 3); |
3473 | bit_range b6_to_7 (6, 2); |
3474 | |
3475 | /* self-intersection is true. */ |
3476 | ASSERT_TRUE (b0.intersects_p (b0)); |
3477 | ASSERT_TRUE (b7.intersects_p (b7)); |
3478 | ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6)); |
3479 | ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7)); |
3480 | |
3481 | ASSERT_FALSE (b0.intersects_p (b1)); |
3482 | ASSERT_FALSE (b1.intersects_p (b0)); |
3483 | ASSERT_FALSE (b0.intersects_p (b7)); |
3484 | ASSERT_FALSE (b7.intersects_p (b0)); |
3485 | |
3486 | ASSERT_TRUE (b0_to_7.intersects_p (b0)); |
3487 | ASSERT_TRUE (b0_to_7.intersects_p (b7)); |
3488 | ASSERT_TRUE (b0.intersects_p (b0_to_7)); |
3489 | ASSERT_TRUE (b7.intersects_p (b0_to_7)); |
3490 | |
3491 | ASSERT_FALSE (b0.intersects_p (b1_to_6)); |
3492 | ASSERT_FALSE (b1_to_6.intersects_p (b0)); |
3493 | ASSERT_TRUE (b1.intersects_p (b1_to_6)); |
3494 | ASSERT_TRUE (b1_to_6.intersects_p (b1)); |
3495 | ASSERT_TRUE (b1_to_6.intersects_p (b6)); |
3496 | ASSERT_FALSE (b1_to_6.intersects_p (b7)); |
3497 | |
3498 | ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7)); |
3499 | ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6)); |
3500 | |
3501 | ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7)); |
3502 | ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5)); |
3503 | |
3504 | bit_range r1 (0,0); |
3505 | bit_range r2 (0,0); |
3506 | ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2)); |
3507 | ASSERT_EQ (r1.get_start_bit_offset (), 0); |
3508 | ASSERT_EQ (r1.m_size_in_bits, 6); |
3509 | ASSERT_EQ (r2.get_start_bit_offset (), 1); |
3510 | ASSERT_EQ (r2.m_size_in_bits, 6); |
3511 | |
3512 | ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2)); |
3513 | ASSERT_EQ (r1.get_start_bit_offset (), 1); |
3514 | ASSERT_EQ (r1.m_size_in_bits, 6); |
3515 | ASSERT_EQ (r2.get_start_bit_offset (), 0); |
3516 | ASSERT_EQ (r2.m_size_in_bits, 6); |
3517 | } |
3518 | |
3519 | /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */ |
3520 | |
3521 | static void |
3522 | assert_bit_range_from_mask_eq (const location &loc, |
3523 | unsigned HOST_WIDE_INT mask, |
3524 | const bit_range &expected) |
3525 | { |
3526 | bit_range actual (0, 0); |
3527 | bool ok = bit_range::from_mask (mask, out: &actual); |
3528 | ASSERT_TRUE_AT (loc, ok); |
3529 | ASSERT_EQ_AT (loc, actual, expected); |
3530 | } |
3531 | |
3532 | /* Assert that bit_range::from_mask (MASK) returns true, and writes |
3533 | out EXPECTED_BIT_RANGE. */ |
3534 | |
3535 | #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \ |
3536 | SELFTEST_BEGIN_STMT \ |
3537 | assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \ |
3538 | EXPECTED_BIT_RANGE); \ |
3539 | SELFTEST_END_STMT |
3540 | |
3541 | /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */ |
3542 | |
3543 | static void |
3544 | assert_no_bit_range_from_mask_eq (const location &loc, |
3545 | unsigned HOST_WIDE_INT mask) |
3546 | { |
3547 | bit_range actual (0, 0); |
3548 | bool ok = bit_range::from_mask (mask, out: &actual); |
3549 | ASSERT_FALSE_AT (loc, ok); |
3550 | } |
3551 | |
3552 | /* Assert that bit_range::from_mask (MASK) returns false. */ |
3553 | |
3554 | #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \ |
3555 | SELFTEST_BEGIN_STMT \ |
3556 | assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \ |
3557 | SELFTEST_END_STMT |
3558 | |
3559 | /* Verify that bit_range::from_mask works as expected. */ |
3560 | |
3561 | static void |
3562 | test_bit_range_from_mask () |
3563 | { |
3564 | /* Should fail on zero. */ |
3565 | ASSERT_NO_BIT_RANGE_FROM_MASK (0); |
3566 | |
3567 | /* Verify 1-bit masks. */ |
3568 | ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1)); |
3569 | ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1)); |
3570 | ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1)); |
3571 | ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1)); |
3572 | ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1)); |
3573 | ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1)); |
3574 | ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1)); |
3575 | ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1)); |
3576 | |
3577 | /* Verify N-bit masks starting at bit 0. */ |
3578 | ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2)); |
3579 | ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3)); |
3580 | ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4)); |
3581 | ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5)); |
3582 | ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6)); |
3583 | ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7)); |
3584 | ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8)); |
3585 | ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16)); |
3586 | |
3587 | /* Various other tests. */ |
3588 | ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2)); |
3589 | ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3)); |
3590 | ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2)); |
3591 | |
3592 | /* Multiple ranges of set bits should fail. */ |
3593 | ASSERT_NO_BIT_RANGE_FROM_MASK (0x101); |
3594 | ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0); |
3595 | } |
3596 | |
3597 | /* Implementation detail of ASSERT_OVERLAP. */ |
3598 | |
3599 | static void |
3600 | assert_overlap (const location &loc, |
3601 | const concrete_binding *b1, |
3602 | const concrete_binding *b2) |
3603 | { |
3604 | ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2)); |
3605 | ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1)); |
3606 | } |
3607 | |
3608 | /* Implementation detail of ASSERT_DISJOINT. */ |
3609 | |
3610 | static void |
3611 | assert_disjoint (const location &loc, |
3612 | const concrete_binding *b1, |
3613 | const concrete_binding *b2) |
3614 | { |
3615 | ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2)); |
3616 | ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1)); |
3617 | } |
3618 | |
3619 | /* Assert that B1 and B2 overlap, checking both ways. */ |
3620 | |
3621 | #define ASSERT_OVERLAP(B1, B2) \ |
3622 | SELFTEST_BEGIN_STMT \ |
3623 | assert_overlap (SELFTEST_LOCATION, B1, B2); \ |
3624 | SELFTEST_END_STMT |
3625 | |
3626 | /* Assert that B1 and B2 do not overlap, checking both ways. */ |
3627 | |
3628 | #define ASSERT_DISJOINT(B1, B2) \ |
3629 | SELFTEST_BEGIN_STMT \ |
3630 | assert_disjoint (SELFTEST_LOCATION, B1, B2); \ |
3631 | SELFTEST_END_STMT |
3632 | |
3633 | /* Verify that concrete_binding::overlaps_p works as expected. */ |
3634 | |
3635 | static void |
3636 | test_binding_key_overlap () |
3637 | { |
3638 | store_manager mgr (NULL); |
3639 | |
3640 | /* Various 8-bit bindings. */ |
3641 | const concrete_binding *cb_0_7 = mgr.get_concrete_binding (start_bit_offset: 0, size_in_bits: 8); |
3642 | const concrete_binding *cb_8_15 = mgr.get_concrete_binding (start_bit_offset: 8, size_in_bits: 8); |
3643 | const concrete_binding *cb_16_23 = mgr.get_concrete_binding (start_bit_offset: 16, size_in_bits: 8); |
3644 | const concrete_binding *cb_24_31 = mgr.get_concrete_binding (start_bit_offset: 24, size_in_bits: 8); |
3645 | |
3646 | /* 16-bit bindings. */ |
3647 | const concrete_binding *cb_0_15 = mgr.get_concrete_binding (start_bit_offset: 0, size_in_bits: 16); |
3648 | const concrete_binding *cb_8_23 = mgr.get_concrete_binding (start_bit_offset: 8, size_in_bits: 16); |
3649 | const concrete_binding *cb_16_31 = mgr.get_concrete_binding (start_bit_offset: 16, size_in_bits: 16); |
3650 | |
3651 | /* 32-bit binding. */ |
3652 | const concrete_binding *cb_0_31 = mgr.get_concrete_binding (start_bit_offset: 0, size_in_bits: 32); |
3653 | |
3654 | /* Everything should self-overlap. */ |
3655 | ASSERT_OVERLAP (cb_0_7, cb_0_7); |
3656 | ASSERT_OVERLAP (cb_8_15, cb_8_15); |
3657 | ASSERT_OVERLAP (cb_16_23, cb_16_23); |
3658 | ASSERT_OVERLAP (cb_24_31, cb_24_31); |
3659 | ASSERT_OVERLAP (cb_0_15, cb_0_15); |
3660 | ASSERT_OVERLAP (cb_8_23, cb_8_23); |
3661 | ASSERT_OVERLAP (cb_16_31, cb_16_31); |
3662 | ASSERT_OVERLAP (cb_0_31, cb_0_31); |
3663 | |
3664 | /* Verify the 8-bit bindings that don't overlap each other. */ |
3665 | ASSERT_DISJOINT (cb_0_7, cb_8_15); |
3666 | ASSERT_DISJOINT (cb_8_15, cb_16_23); |
3667 | |
3668 | /* Check for overlap of differently-sized bindings. */ |
3669 | ASSERT_OVERLAP (cb_0_7, cb_0_31); |
3670 | /* ...and with differing start points. */ |
3671 | ASSERT_OVERLAP (cb_8_15, cb_0_31); |
3672 | ASSERT_DISJOINT (cb_8_15, cb_16_31); |
3673 | ASSERT_OVERLAP (cb_16_23, cb_0_31); |
3674 | ASSERT_OVERLAP (cb_16_31, cb_0_31); |
3675 | |
3676 | ASSERT_DISJOINT (cb_0_7, cb_8_23); |
3677 | ASSERT_OVERLAP (cb_8_23, cb_16_23); |
3678 | ASSERT_OVERLAP (cb_8_23, cb_16_31); |
3679 | ASSERT_DISJOINT (cb_8_23, cb_24_31); |
3680 | } |
3681 | |
3682 | /* Run all of the selftests within this file. */ |
3683 | |
3684 | void |
3685 | analyzer_store_cc_tests () |
3686 | { |
3687 | test_bit_range_intersects_p (); |
3688 | test_bit_range_from_mask (); |
3689 | test_binding_key_overlap (); |
3690 | } |
3691 | |
3692 | } // namespace selftest |
3693 | |
3694 | #endif /* CHECKING_P */ |
3695 | |
3696 | } // namespace ana |
3697 | |
3698 | #endif /* #if ENABLE_ANALYZER */ |
3699 | |