1/* Classes for saving, deduplicating, and emitting analyzer diagnostics.
2 Copyright (C) 2019-2023 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful, but
13WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#define INCLUDE_MEMORY
23#include "system.h"
24#include "coretypes.h"
25#include "tree.h"
26#include "input.h"
27#include "pretty-print.h"
28#include "gcc-rich-location.h"
29#include "gimple-pretty-print.h"
30#include "function.h"
31#include "diagnostic-core.h"
32#include "diagnostic-event-id.h"
33#include "diagnostic-path.h"
34#include "bitmap.h"
35#include "ordered-hash-map.h"
36#include "analyzer/analyzer.h"
37#include "analyzer/analyzer-logging.h"
38#include "analyzer/sm.h"
39#include "analyzer/pending-diagnostic.h"
40#include "analyzer/diagnostic-manager.h"
41#include "analyzer/call-string.h"
42#include "analyzer/program-point.h"
43#include "analyzer/store.h"
44#include "analyzer/region-model.h"
45#include "analyzer/constraint-manager.h"
46#include "cfg.h"
47#include "basic-block.h"
48#include "gimple.h"
49#include "gimple-iterator.h"
50#include "inlining-iterator.h"
51#include "cgraph.h"
52#include "digraph.h"
53#include "analyzer/supergraph.h"
54#include "analyzer/program-state.h"
55#include "analyzer/exploded-graph.h"
56#include "analyzer/trimmed-graph.h"
57#include "analyzer/feasible-graph.h"
58#include "analyzer/checker-path.h"
59#include "analyzer/reachability.h"
60#include "make-unique.h"
61
62#if ENABLE_ANALYZER
63
64namespace ana {
65
66class feasible_worklist;
67
68/* State for finding the shortest feasible exploded_path for a
69 saved_diagnostic.
70 This is shared between all diagnostics, so that we avoid repeating work. */
71
72class epath_finder
73{
74public:
75 epath_finder (const exploded_graph &eg)
76 : m_eg (eg),
77 m_sep (NULL)
78 {
79 /* This is shared by all diagnostics, but only needed if
80 !flag_analyzer_feasibility. */
81 if (!flag_analyzer_feasibility)
82 m_sep = new shortest_exploded_paths (eg, eg.get_origin (),
83 SPS_FROM_GIVEN_ORIGIN);
84 }
85
86 ~epath_finder () { delete m_sep; }
87
88 logger *get_logger () const { return m_eg.get_logger (); }
89
90 std::unique_ptr<exploded_path>
91 get_best_epath (const exploded_node *target_enode,
92 const gimple *target_stmt,
93 const pending_diagnostic &pd,
94 const char *desc, unsigned diag_idx,
95 std::unique_ptr<feasibility_problem> *out_problem);
96
97private:
98 DISABLE_COPY_AND_ASSIGN(epath_finder);
99
100 std::unique_ptr<exploded_path>
101 explore_feasible_paths (const exploded_node *target_enode,
102 const gimple *target_stmt,
103 const pending_diagnostic &pd,
104 const char *desc, unsigned diag_idx);
105 bool
106 process_worklist_item (feasible_worklist *worklist,
107 const trimmed_graph &tg,
108 feasible_graph *fg,
109 const exploded_node *target_enode,
110 const gimple *target_stmt,
111 const pending_diagnostic &pd,
112 unsigned diag_idx,
113 std::unique_ptr<exploded_path> *out_best_path) const;
114 void dump_trimmed_graph (const exploded_node *target_enode,
115 const char *desc, unsigned diag_idx,
116 const trimmed_graph &tg,
117 const shortest_paths<eg_traits, exploded_path> &sep);
118 void dump_feasible_graph (const exploded_node *target_enode,
119 const char *desc, unsigned diag_idx,
120 const feasible_graph &fg);
121 void dump_feasible_path (const exploded_node *target_enode,
122 unsigned diag_idx,
123 const feasible_graph &fg,
124 const feasible_node &fnode) const;
125
126 const exploded_graph &m_eg;
127 shortest_exploded_paths *m_sep;
128};
129
130/* class epath_finder. */
131
132/* Get the "best" exploded_path for reaching ENODE from the origin,
133 returning ownership of it to the caller.
134
135 If TARGET_STMT is non-NULL, then check for reaching that stmt
136 within ENODE.
137
138 Ideally we want to report the shortest feasible path.
139 Return NULL if we could not find a feasible path
140 (when flag_analyzer_feasibility is true).
141
142 If flag_analyzer_feasibility is false, then simply return the
143 shortest path.
144
145 Use DESC and DIAG_IDX when logging.
146
147 Write any feasibility_problem to *OUT_PROBLEM. */
148
149std::unique_ptr<exploded_path>
150epath_finder::get_best_epath (const exploded_node *enode,
151 const gimple *target_stmt,
152 const pending_diagnostic &pd,
153 const char *desc, unsigned diag_idx,
154 std::unique_ptr<feasibility_problem> *out_problem)
155{
156 logger *logger = get_logger ();
157 LOG_SCOPE (logger);
158
159 unsigned snode_idx = enode->get_supernode ()->m_index;
160 if (logger)
161 logger->log (fmt: "considering %qs at EN: %i, SN: %i (sd: %i)",
162 desc, enode->m_index, snode_idx, diag_idx);
163
164 /* State-merging means that not every path in the egraph corresponds
165 to a feasible one w.r.t. states.
166
167 We want to find the shortest feasible path from the origin to ENODE
168 in the egraph. */
169
170 if (flag_analyzer_feasibility)
171 {
172 /* Attempt to find the shortest feasible path using feasible_graph. */
173 if (logger)
174 logger->log (fmt: "trying to find shortest feasible path");
175 if (std::unique_ptr<exploded_path> epath
176 = explore_feasible_paths (target_enode: enode, target_stmt, pd, desc, diag_idx))
177 {
178 if (logger)
179 logger->log (fmt: "accepting %qs at EN: %i, SN: %i (sd: %i)"
180 " with feasible path (length: %i)",
181 desc, enode->m_index, snode_idx, diag_idx,
182 epath->length ());
183 return epath;
184 }
185 else
186 {
187 if (logger)
188 logger->log (fmt: "rejecting %qs at EN: %i, SN: %i (sd: %i)"
189 " due to not finding feasible path",
190 desc, enode->m_index, snode_idx, diag_idx);
191 return NULL;
192 }
193 }
194 else
195 {
196 /* As a crude approximation to shortest feasible path, simply find
197 the shortest path, and note whether it is feasible.
198 There could be longer feasible paths within the egraph, so this
199 approach would lead to diagnostics being falsely rejected
200 (PR analyzer/96374). */
201 if (logger)
202 logger->log (fmt: "trying to find shortest path ignoring feasibility");
203 gcc_assert (m_sep);
204 std::unique_ptr<exploded_path> epath
205 = make_unique<exploded_path> (args: m_sep->get_shortest_path (other_node: enode));
206 if (epath->feasible_p (logger, out: out_problem, eng: m_eg.get_engine (), eg: &m_eg))
207 {
208 if (logger)
209 logger->log (fmt: "accepting %qs at EN: %i, SN: %i (sn: %i)"
210 " with feasible path (length: %i)",
211 desc, enode->m_index, snode_idx, diag_idx,
212 epath->length ());
213 }
214 else
215 {
216 if (logger)
217 logger->log (fmt: "accepting %qs at EN: %i, SN: %i (sn: %i) (length: %i)"
218 " despite infeasible path (due to %qs)",
219 desc, enode->m_index, snode_idx, diag_idx,
220 epath->length (),
221 "-fno-analyzer-feasibility");
222 }
223 return epath;
224 }
225}
226
227/* A class for managing the worklist of feasible_nodes in
228 epath_finder::explore_feasible_paths, prioritizing them
229 so that shorter paths appear earlier in the queue. */
230
231class feasible_worklist
232{
233public:
234 feasible_worklist (const shortest_paths<eg_traits, exploded_path> &sep)
235 : m_queue (key_t (*this, NULL)),
236 m_sep (sep)
237 {
238 }
239
240 feasible_node *take_next () { return m_queue.extract_min (); }
241
242 void add_node (feasible_node *fnode)
243 {
244 m_queue.insert (key: key_t (*this, fnode), data: fnode);
245 }
246
247private:
248 struct key_t
249 {
250 key_t (const feasible_worklist &w, feasible_node *fnode)
251 : m_worklist (w), m_fnode (fnode)
252 {}
253
254 bool operator< (const key_t &other) const
255 {
256 return cmp (ka: *this, kb: other) < 0;
257 }
258
259 bool operator== (const key_t &other) const
260 {
261 return cmp (ka: *this, kb: other) == 0;
262 }
263
264 bool operator> (const key_t &other) const
265 {
266 return !(*this == other || *this < other);
267 }
268
269 private:
270 static int cmp (const key_t &ka, const key_t &kb)
271 {
272 /* Choose the node for which if the remaining path were feasible,
273 it would be the shortest path (summing the length of the
274 known-feasible path so far with that of the remaining
275 possibly-feasible path). */
276 int ca = ka.m_worklist.get_estimated_cost (fnode: ka.m_fnode);
277 int cb = kb.m_worklist.get_estimated_cost (fnode: kb.m_fnode);
278 return ca - cb;
279 }
280
281 const feasible_worklist &m_worklist;
282 feasible_node *m_fnode;
283 };
284
285 /* Get the estimated length of a path involving FNODE from
286 the origin to the target enode.
287 Sum the length of the known-feasible path so far with
288 that of the remaining possibly-feasible path. */
289
290 int get_estimated_cost (const feasible_node *fnode) const
291 {
292 unsigned length_so_far = fnode->get_path_length ();
293 int shortest_remaining_path
294 = m_sep.get_shortest_distance (other_node: fnode->get_inner_node ());
295
296 gcc_assert (shortest_remaining_path >= 0);
297 /* This should be true since we're only exploring nodes within
298 the trimmed graph (and we anticipate it being much smaller
299 than this, and thus not overflowing the sum). */
300 gcc_assert (shortest_remaining_path < INT_MAX);
301
302 return length_so_far + shortest_remaining_path;
303 }
304
305 /* Priority queue, backed by a fibonacci_heap. */
306 typedef fibonacci_heap<key_t, feasible_node> queue_t;
307 queue_t m_queue;
308 const shortest_paths<eg_traits, exploded_path> &m_sep;
309};
310
311/* When we're building the exploded graph we want to simplify
312 overly-complicated symbolic values down to "UNKNOWN" to try to avoid
313 state explosions and unbounded chains of exploration.
314
315 However, when we're building the feasibility graph for a diagnostic
316 (actually a tree), we don't want UNKNOWN values, as conditions on them
317 are also unknown: we don't want to have a contradiction such as a path
318 where (VAL != 0) and then (VAL == 0) along the same path.
319
320 Hence this is an RAII class for temporarily disabling complexity-checking
321 in the region_model_manager, for use within
322 epath_finder::explore_feasible_paths.
323
324 We also disable the creation of unknown_svalue instances during feasibility
325 checking, instead creating unique svalues, to avoid paradoxes in paths. */
326
327class auto_checking_feasibility
328{
329public:
330 auto_checking_feasibility (region_model_manager *mgr) : m_mgr (mgr)
331 {
332 m_mgr->begin_checking_feasibility ();
333 }
334 ~auto_checking_feasibility ()
335 {
336 m_mgr->end_checking_feasibility ();
337 }
338private:
339 region_model_manager *m_mgr;
340};
341
342/* Attempt to find the shortest feasible path from the origin to
343 TARGET_ENODE by iteratively building a feasible_graph, in which
344 every path to a feasible_node is feasible by construction.
345
346 If TARGET_STMT is non-NULL, then check for reaching that stmt
347 within TARGET_ENODE.
348
349 We effectively explore the tree of feasible paths in order of shortest
350 path until we either find a feasible path to TARGET_ENODE, or hit
351 a limit and give up.
352
353 Preliminaries:
354 - Find the shortest path from each node to the TARGET_ENODE (without
355 checking feasibility), so that we can prioritize our worklist.
356 - Construct a trimmed_graph: the subset of nodes/edges that
357 are on a path that eventually reaches TARGET_ENODE. We will only need
358 to consider these when considering the shortest feasible path.
359
360 Build a feasible_graph, in which every path to a feasible_node
361 is feasible by construction.
362 We use a worklist to flatten the exploration into an iteration.
363 Starting at the origin, find feasible out-edges within the trimmed graph.
364 At each stage, choose the node for which if the remaining path were feasible,
365 it would be the shortest path (summing the length of the known-feasible path
366 so far with that of the remaining possibly-feasible path).
367 This way, the first feasible path we find to TARGET_ENODE is the shortest.
368 We start by trying the shortest possible path, but if that fails,
369 we explore progressively longer paths, eventually trying iterations through
370 loops. The exploration is captured in the feasible_graph, which can be
371 dumped as a .dot file to visualize the exploration. The indices of the
372 feasible_nodes show the order in which they were created.
373
374 This is something of a brute-force approach, but the trimmed_graph
375 hopefully keeps the complexity manageable.
376
377 Terminate with failure when the number of infeasible edges exceeds
378 a threshold (--param=analyzer-max-infeasible-edges=).
379 This is guaranteed to eventually lead to terminatation, as
380 we can't keep creating feasible nodes without eventually
381 either reaching an infeasible edge, or reaching the
382 TARGET_ENODE. Specifically, there can't be a cycle of
383 feasible edges that doesn't reach the target_enode without
384 an out-edge that either fails feasibility or gets closer
385 to the TARGET_ENODE: on each iteration we are either:
386 - effectively getting closer to the TARGET_ENODE (which can't
387 continue forever without reaching the target), or
388 - getting monotonically closer to the termination threshold. */
389
390std::unique_ptr<exploded_path>
391epath_finder::explore_feasible_paths (const exploded_node *target_enode,
392 const gimple *target_stmt,
393 const pending_diagnostic &pd,
394 const char *desc, unsigned diag_idx)
395{
396 logger *logger = get_logger ();
397 LOG_SCOPE (logger);
398
399 region_model_manager *mgr = m_eg.get_engine ()->get_model_manager ();
400
401 /* Determine the shortest path to TARGET_ENODE from each node in
402 the exploded graph. */
403 shortest_paths<eg_traits, exploded_path> sep
404 (m_eg, target_enode, SPS_TO_GIVEN_TARGET);
405
406 /* Construct a trimmed_graph: the subset of nodes/edges that
407 are on a path that eventually reaches TARGET_ENODE.
408 We only need to consider these when considering the shortest
409 feasible path. */
410 trimmed_graph tg (m_eg, target_enode);
411
412 if (flag_dump_analyzer_feasibility)
413 dump_trimmed_graph (target_enode, desc, diag_idx, tg, sep);
414
415 feasible_graph fg;
416 feasible_worklist worklist (sep);
417
418 /* Populate the worklist with the origin node. */
419 {
420 feasibility_state init_state (mgr, m_eg.get_supergraph ());
421 feasible_node *origin = fg.add_node (enode: m_eg.get_origin (), state: init_state, path_length: 0);
422 worklist.add_node (fnode: origin);
423 }
424
425 /* Iteratively explore the tree of feasible paths in order of shortest
426 path until we either find a feasible path to TARGET_ENODE, or hit
427 a limit. */
428
429 /* Set this if we find a feasible path to TARGET_ENODE. */
430 std::unique_ptr<exploded_path> best_path = NULL;
431
432 {
433 auto_checking_feasibility sentinel (mgr);
434
435 while (process_worklist_item (worklist: &worklist, tg, fg: &fg, target_enode, target_stmt,
436 pd, diag_idx, out_best_path: &best_path))
437 {
438 /* Empty; the work is done within process_worklist_item. */
439 }
440 }
441
442 if (logger)
443 {
444 logger->log (fmt: "tg for sd: %i:", diag_idx);
445 logger->inc_indent ();
446 tg.log_stats (logger);
447 logger->dec_indent ();
448
449 logger->log (fmt: "fg for sd: %i:", diag_idx);
450 logger->inc_indent ();
451 fg.log_stats (logger);
452 logger->dec_indent ();
453 }
454
455 /* Dump the feasible_graph. */
456 if (flag_dump_analyzer_feasibility)
457 dump_feasible_graph (target_enode, desc, diag_idx, fg);
458
459 return best_path;
460}
461
462/* Process the next item in WORKLIST, potentially adding new items
463 based on feasible out-edges, and extending FG accordingly.
464 Use TG to ignore out-edges that don't lead to TARGET_ENODE.
465 Return true if the worklist processing should continue.
466 Return false if the processing of the worklist should stop
467 (either due to reaching TARGET_ENODE, or hitting a limit).
468 Write to *OUT_BEST_PATH if stopping due to finding a feasible path
469 to TARGET_ENODE.
470 Use PD to provide additional restrictions on feasibility of
471 the final path in the feasible_graph before converting to
472 an exploded_path. */
473
474bool
475epath_finder::
476process_worklist_item (feasible_worklist *worklist,
477 const trimmed_graph &tg,
478 feasible_graph *fg,
479 const exploded_node *target_enode,
480 const gimple *target_stmt,
481 const pending_diagnostic &pd,
482 unsigned diag_idx,
483 std::unique_ptr<exploded_path> *out_best_path) const
484{
485 logger *logger = get_logger ();
486
487 feasible_node *fnode = worklist->take_next ();
488 if (!fnode)
489 {
490 if (logger)
491 logger->log (fmt: "drained worklist for sd: %i"
492 " without finding feasible path",
493 diag_idx);
494 return false;
495 }
496
497 log_scope s (logger, "fg worklist item",
498 "considering FN: %i (EN: %i) for sd: %i",
499 fnode->get_index (), fnode->get_inner_node ()->m_index,
500 diag_idx);
501
502 /* Iterate through all out-edges from this item. */
503 unsigned i;
504 exploded_edge *succ_eedge;
505 FOR_EACH_VEC_ELT (fnode->get_inner_node ()->m_succs, i, succ_eedge)
506 {
507 log_scope s (logger, "edge", "considering edge: EN:%i -> EN:%i",
508 succ_eedge->m_src->m_index,
509 succ_eedge->m_dest->m_index);
510 /* Reject edges that aren't in the trimmed graph. */
511 if (!tg.contains_p (eedge: succ_eedge))
512 {
513 if (logger)
514 logger->log (fmt: "rejecting: not in trimmed graph");
515 continue;
516 }
517
518 feasibility_state succ_state (fnode->get_state ());
519 std::unique_ptr<rejected_constraint> rc;
520 if (succ_state.maybe_update_for_edge (logger, eedge: succ_eedge, out_rc: &rc))
521 {
522 gcc_assert (rc == NULL);
523 feasible_node *succ_fnode
524 = fg->add_node (enode: succ_eedge->m_dest,
525 state: succ_state,
526 path_length: fnode->get_path_length () + 1);
527 if (logger)
528 logger->log (fmt: "accepting as FN: %i", succ_fnode->get_index ());
529 fg->add_edge (edge: new feasible_edge (fnode, succ_fnode, succ_eedge));
530
531 /* Have we reached TARGET_ENODE? */
532 if (succ_fnode->get_inner_node () == target_enode)
533 {
534 if (logger)
535 logger->log (fmt: "success: got feasible path to EN: %i (sd: %i)"
536 " (length: %i)",
537 target_enode->m_index, diag_idx,
538 succ_fnode->get_path_length ());
539 if (!pd.check_valid_fpath_p (*succ_fnode, target_stmt))
540 {
541 if (logger)
542 logger->log (fmt: "rejecting feasible path due to"
543 " pending_diagnostic");
544 return false;
545 }
546 *out_best_path = fg->make_epath (fnode: succ_fnode);
547 if (flag_dump_analyzer_feasibility)
548 dump_feasible_path (target_enode, diag_idx, fg: *fg, fnode: *succ_fnode);
549
550 /* Success: stop the worklist iteration. */
551 return false;
552 }
553 else
554 worklist->add_node (fnode: succ_fnode);
555 }
556 else
557 {
558 if (logger)
559 logger->log (fmt: "infeasible");
560 gcc_assert (rc);
561 fg->add_feasibility_problem (src_fnode: fnode,
562 eedge: succ_eedge,
563 rc: std::move (rc));
564
565 /* Give up if there have been too many infeasible edges. */
566 if (fg->get_num_infeasible ()
567 > (unsigned)param_analyzer_max_infeasible_edges)
568 {
569 if (logger)
570 logger->log (fmt: "too many infeasible edges (%i); giving up",
571 fg->get_num_infeasible ());
572 return false;
573 }
574 }
575 }
576
577 /* Continue the worklist iteration. */
578 return true;
579}
580
581/* Helper class for epath_finder::dump_trimmed_graph
582 to dump extra per-node information.
583 Use SEP to add the length of the shortest path from each
584 node to the target node to each node's dump. */
585
586class dump_eg_with_shortest_path : public eg_traits::dump_args_t
587{
588public:
589 dump_eg_with_shortest_path
590 (const exploded_graph &eg,
591 const shortest_paths<eg_traits, exploded_path> &sep)
592 : dump_args_t (eg),
593 m_sep (sep)
594 {
595 }
596
597 void dump_extra_info (const exploded_node *enode,
598 pretty_printer *pp) const final override
599 {
600 pp_printf (pp, "sp: %i", m_sep.get_shortest_path (other_node: enode).length ());
601 pp_newline (pp);
602 }
603
604private:
605 const shortest_paths<eg_traits, exploded_path> &m_sep;
606};
607
608/* Dump TG to "BASE_NAME.DESC.DIAG_IDX.to-enN.tg.dot",
609 annotating each node with the length of the shortest path
610 from that node to TARGET_ENODE (using SEP). */
611
612void
613epath_finder::
614dump_trimmed_graph (const exploded_node *target_enode,
615 const char *desc, unsigned diag_idx,
616 const trimmed_graph &tg,
617 const shortest_paths<eg_traits, exploded_path> &sep)
618{
619 auto_timevar tv (TV_ANALYZER_DUMP);
620 dump_eg_with_shortest_path inner_args (m_eg, sep);
621 trimmed_graph::dump_args_t args (inner_args);
622 pretty_printer pp;
623 pp_printf (&pp, "%s.%s.%i.to-en%i.tg.dot",
624 dump_base_name, desc, diag_idx, target_enode->m_index);
625 char *filename = xstrdup (pp_formatted_text (&pp));
626 tg.dump_dot (path: filename, NULL, args);
627 free (ptr: filename);
628}
629
630/* Dump FG to "BASE_NAME.DESC.DIAG_IDX.to-enN.fg.dot". */
631
632void
633epath_finder::dump_feasible_graph (const exploded_node *target_enode,
634 const char *desc, unsigned diag_idx,
635 const feasible_graph &fg)
636{
637 auto_timevar tv (TV_ANALYZER_DUMP);
638 exploded_graph::dump_args_t inner_args (m_eg);
639 feasible_graph::dump_args_t args (inner_args);
640 pretty_printer pp;
641 pp_printf (&pp, "%s.%s.%i.to-en%i.fg.dot",
642 dump_base_name, desc, diag_idx, target_enode->m_index);
643 char *filename = xstrdup (pp_formatted_text (&pp));
644 fg.dump_dot (path: filename, NULL, args);
645 free (ptr: filename);
646}
647
648/* Dump the path to FNODE to "BASE_NAME.DIAG_IDX.to-enN.fpath.txt". */
649
650void
651epath_finder::dump_feasible_path (const exploded_node *target_enode,
652 unsigned diag_idx,
653 const feasible_graph &fg,
654 const feasible_node &fnode) const
655{
656 auto_timevar tv (TV_ANALYZER_DUMP);
657 pretty_printer pp;
658 pp_printf (&pp, "%s.%i.to-en%i.fpath.txt",
659 dump_base_name, diag_idx, target_enode->m_index);
660 char *filename = xstrdup (pp_formatted_text (&pp));
661 fg.dump_feasible_path (dst_fnode: fnode, filename);
662 free (ptr: filename);
663}
664
665/* class saved_diagnostic. */
666
667/* saved_diagnostic's ctor. */
668
669saved_diagnostic::saved_diagnostic (const state_machine *sm,
670 const pending_location &ploc,
671 tree var,
672 const svalue *sval,
673 state_machine::state_t state,
674 std::unique_ptr<pending_diagnostic> d,
675 unsigned idx)
676: m_sm (sm), m_enode (ploc.m_enode), m_snode (ploc.m_snode),
677 m_stmt (ploc.m_stmt),
678 /* stmt_finder could be on-stack; we want our own copy that can
679 outlive that. */
680 m_stmt_finder (ploc.m_finder ? ploc.m_finder->clone () : NULL),
681 m_loc (ploc.m_loc),
682 m_var (var), m_sval (sval), m_state (state),
683 m_d (std::move (d)), m_trailing_eedge (NULL),
684 m_idx (idx),
685 m_best_epath (NULL), m_problem (NULL),
686 m_notes ()
687{
688 /* We must have an enode in order to be able to look for paths
689 through the exploded_graph to this diagnostic. */
690 gcc_assert (m_enode);
691}
692
693bool
694saved_diagnostic::operator== (const saved_diagnostic &other) const
695{
696 if (m_notes.length () != other.m_notes.length ())
697 return false;
698 for (unsigned i = 0; i < m_notes.length (); i++)
699 if (!m_notes[i]->equal_p (other: *other.m_notes[i]))
700 return false;
701 return (m_sm == other.m_sm
702 /* We don't compare m_enode. */
703 && m_snode == other.m_snode
704 && m_stmt == other.m_stmt
705 /* We don't compare m_stmt_finder. */
706 && m_loc == other.m_loc
707 && pending_diagnostic::same_tree_p (t1: m_var, t2: other.m_var)
708 && m_state == other.m_state
709 && m_d->equal_p (other: *other.m_d)
710 && m_trailing_eedge == other.m_trailing_eedge);
711}
712
713/* Add PN to this diagnostic, taking ownership of it. */
714
715void
716saved_diagnostic::add_note (std::unique_ptr<pending_note> pn)
717{
718 gcc_assert (pn);
719 m_notes.safe_push (obj: pn.release ());
720}
721
722/* Add EVENT to this diagnostic. */
723
724void
725saved_diagnostic::add_event (std::unique_ptr<checker_event> event)
726{
727 gcc_assert (event);
728 m_saved_events.safe_push (obj: event.release ());
729}
730
731/* Return a new json::object of the form
732 {"sm": optional str,
733 "enode": int,
734 "snode": int,
735 "sval": optional str,
736 "state": optional str,
737 "path_length": optional int,
738 "pending_diagnostic": str,
739 "idx": int}. */
740
741json::object *
742saved_diagnostic::to_json () const
743{
744 json::object *sd_obj = new json::object ();
745
746 if (m_sm)
747 sd_obj->set (key: "sm", v: new json::string (m_sm->get_name ()));
748 sd_obj->set (key: "enode", v: new json::integer_number (m_enode->m_index));
749 sd_obj->set (key: "snode", v: new json::integer_number (m_snode->m_index));
750 if (m_sval)
751 sd_obj->set (key: "sval", v: m_sval->to_json ());
752 if (m_state)
753 sd_obj->set (key: "state", v: m_state->to_json ());
754 if (m_best_epath)
755 sd_obj->set (key: "path_length", v: new json::integer_number (get_epath_length ()));
756 sd_obj->set (key: "pending_diagnostic", v: new json::string (m_d->get_kind ()));
757 sd_obj->set (key: "idx", v: new json::integer_number (m_idx));
758
759 /* We're not yet JSONifying the following fields:
760 const gimple *m_stmt;
761 stmt_finder *m_stmt_finder;
762 tree m_var;
763 exploded_edge *m_trailing_eedge;
764 enum status m_status;
765 feasibility_problem *m_problem;
766 auto_delete_vec <pending_note> m_notes;
767 */
768
769 return sd_obj;
770}
771
772/* Dump this to PP in a form suitable for use as an id in .dot output. */
773
774void
775saved_diagnostic::dump_dot_id (pretty_printer *pp) const
776{
777 pp_printf (pp, "sd_%i", m_idx);
778}
779
780/* Dump this to PP in a form suitable for use as a node in .dot output. */
781
782void
783saved_diagnostic::dump_as_dot_node (pretty_printer *pp) const
784{
785 dump_dot_id (pp);
786 pp_printf (pp,
787 " [shape=none,margin=0,style=filled,fillcolor=\"red\",label=\"");
788 pp_write_text_to_stream (pp);
789
790 /* Node label. */
791 pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)\n",
792 m_d->get_kind (), m_idx);
793 if (m_sm)
794 {
795 pp_printf (pp, "sm: %s", m_sm->get_name ());
796 if (m_state)
797 {
798 pp_printf (pp, "; state: ");
799 m_state->dump_to_pp (pp);
800 }
801 pp_newline (pp);
802 }
803 if (m_stmt)
804 {
805 pp_string (pp, "stmt: ");
806 pp_gimple_stmt_1 (pp, m_stmt, 0, (dump_flags_t)0);
807 pp_newline (pp);
808 }
809 if (m_var)
810 pp_printf (pp, "var: %qE\n", m_var);
811 if (m_sval)
812 {
813 pp_string (pp, "sval: ");
814 m_sval->dump_to_pp (pp, simple: true);
815 pp_newline (pp);
816 }
817 if (m_best_epath)
818 pp_printf (pp, "path length: %i\n", get_epath_length ());
819
820 pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
821 pp_string (pp, "\"];\n\n");
822
823 /* Show links to duplicates. */
824 for (auto iter : m_duplicates)
825 {
826 dump_dot_id (pp);
827 pp_string (pp, " -> ");
828 iter->dump_dot_id (pp);
829 pp_string (pp, " [style=\"dotted\" arrowhead=\"none\"];");
830 pp_newline (pp);
831 }
832}
833
834/* Use PF to find the best exploded_path for this saved_diagnostic,
835 and store it in m_best_epath.
836 If we don't have a specific location in m_loc and m_stmt is still NULL,
837 use m_stmt_finder on the epath to populate m_stmt.
838 Return true if a best path was found. */
839
840bool
841saved_diagnostic::calc_best_epath (epath_finder *pf)
842{
843 logger *logger = pf->get_logger ();
844 LOG_SCOPE (logger);
845 m_problem = NULL;
846
847 m_best_epath = pf->get_best_epath (enode: m_enode, target_stmt: m_stmt,
848 pd: *m_d, desc: m_d->get_kind (), diag_idx: m_idx,
849 out_problem: &m_problem);
850
851 /* Handle failure to find a feasible path. */
852 if (m_best_epath == NULL)
853 return false;
854
855 gcc_assert (m_best_epath);
856 if (m_loc == UNKNOWN_LOCATION)
857 {
858 if (m_stmt == NULL)
859 {
860 gcc_assert (m_stmt_finder);
861 m_stmt = m_stmt_finder->find_stmt (epath: *m_best_epath);
862 }
863 gcc_assert (m_stmt);
864 }
865
866 return true;
867}
868
869unsigned
870saved_diagnostic::get_epath_length () const
871{
872 gcc_assert (m_best_epath);
873 return m_best_epath->length ();
874}
875
876/* Record that OTHER (and its duplicates) are duplicates
877 of this saved_diagnostic. */
878
879void
880saved_diagnostic::add_duplicate (saved_diagnostic *other)
881{
882 gcc_assert (other);
883 m_duplicates.reserve (nelems: m_duplicates.length ()
884 + other->m_duplicates.length ()
885 + 1);
886 m_duplicates.splice (src: other->m_duplicates);
887 other->m_duplicates.truncate (size: 0);
888 m_duplicates.safe_push (obj: other);
889}
890
891/* Walk up the sedges of each of the two paths.
892 If the two sequences of sedges do not perfectly correspond,
893 then paths are incompatible.
894 If there is at least one sedge that either cannot be paired up
895 or its counterpart is not equal, then the paths are incompatible
896 and this function returns FALSE.
897 Otherwise return TRUE.
898
899 Incompatible paths:
900
901 <cond Y>
902 / \
903 / \
904 true false
905 | |
906 ... ...
907 | |
908 ... stmt x
909 |
910 stmt x
911
912 Both LHS_PATH and RHS_PATH final enodes should be
913 over the same gimple statement. */
914
915static bool
916compatible_epath_p (const exploded_path *lhs_path,
917 const exploded_path *rhs_path)
918{
919 gcc_assert (lhs_path);
920 gcc_assert (rhs_path);
921 gcc_assert (rhs_path->length () > 0);
922 gcc_assert (rhs_path->length () > 0);
923 int lhs_eedge_idx = lhs_path->length () - 1;
924 int rhs_eedge_idx = rhs_path->length () - 1;
925 const exploded_edge *lhs_eedge;
926 const exploded_edge *rhs_eedge;
927
928 while (lhs_eedge_idx >= 0 && rhs_eedge_idx >= 0)
929 {
930 while (lhs_eedge_idx >= 0)
931 {
932 /* Find LHS_PATH's next superedge. */
933 lhs_eedge = lhs_path->m_edges[lhs_eedge_idx];
934 if (lhs_eedge->m_sedge)
935 break;
936 else
937 lhs_eedge_idx--;
938 }
939 while (rhs_eedge_idx >= 0)
940 {
941 /* Find RHS_PATH's next superedge. */
942 rhs_eedge = rhs_path->m_edges[rhs_eedge_idx];
943 if (rhs_eedge->m_sedge)
944 break;
945 else
946 rhs_eedge_idx--;
947 }
948
949 if (lhs_eedge->m_sedge && rhs_eedge->m_sedge)
950 {
951 if (lhs_eedge->m_sedge != rhs_eedge->m_sedge)
952 /* Both superedges do not match.
953 Superedges are not dependent on the exploded path, so even
954 different epaths will have similar sedges if they follow
955 the same outcome of a conditional node. */
956 return false;
957
958 lhs_eedge_idx--;
959 rhs_eedge_idx--;
960 continue;
961 }
962 else if (lhs_eedge->m_sedge == nullptr && rhs_eedge->m_sedge == nullptr)
963 /* Both paths were drained up entirely.
964 No discriminant was found. */
965 return true;
966
967 /* A superedge was found for only one of the two paths. */
968 return false;
969 }
970
971 /* A superedge was found for only one of the two paths. */
972 if (lhs_eedge_idx >= 0 || rhs_eedge_idx >= 0)
973 return false;
974
975 /* Both paths were drained up entirely.
976 No discriminant was found. */
977 return true;
978}
979
980
981/* Return true if this diagnostic supercedes OTHER, and that OTHER should
982 therefore not be emitted. */
983
984bool
985saved_diagnostic::supercedes_p (const saved_diagnostic &other) const
986{
987 /* They should be at the same stmt. */
988 if (m_stmt != other.m_stmt)
989 return false;
990 /* return early if OTHER won't be superseded anyway. */
991 if (!m_d->supercedes_p (other: *other.m_d))
992 return false;
993
994 /* If the two saved_diagnostics' path are not compatible
995 then they cannot supersede one another. */
996 return compatible_epath_p (lhs_path: m_best_epath.get (), rhs_path: other.m_best_epath.get ());
997}
998
999/* Move any saved checker_events from this saved_diagnostic to
1000 the end of DST_PATH. */
1001
1002void
1003saved_diagnostic::add_any_saved_events (checker_path &dst_path)
1004{
1005 for (auto &event : m_saved_events)
1006 {
1007 dst_path.add_event (event: std::unique_ptr<checker_event> (event));
1008 event = nullptr;
1009 }
1010}
1011
1012/* Emit any pending notes owned by this diagnostic. */
1013
1014void
1015saved_diagnostic::emit_any_notes () const
1016{
1017 for (auto pn : m_notes)
1018 pn->emit ();
1019}
1020
1021/* State for building a checker_path from a particular exploded_path.
1022 In particular, this precomputes reachability information: the set of
1023 source enodes for which a path be found to the diagnostic enode. */
1024
1025class path_builder
1026{
1027public:
1028 path_builder (const exploded_graph &eg,
1029 const exploded_path &epath,
1030 const feasibility_problem *problem,
1031 const saved_diagnostic &sd)
1032 : m_eg (eg),
1033 m_diag_enode (epath.get_final_enode ()),
1034 m_sd (sd),
1035 m_reachability (eg, m_diag_enode),
1036 m_feasibility_problem (problem)
1037 {}
1038
1039 const exploded_node *get_diag_node () const { return m_diag_enode; }
1040
1041 pending_diagnostic *get_pending_diagnostic () const
1042 {
1043 return m_sd.m_d.get ();
1044 }
1045
1046 bool reachable_from_p (const exploded_node *src_enode) const
1047 {
1048 return m_reachability.reachable_from_p (src_node: src_enode);
1049 }
1050
1051 const extrinsic_state &get_ext_state () const { return m_eg.get_ext_state (); }
1052
1053 const feasibility_problem *get_feasibility_problem () const
1054 {
1055 return m_feasibility_problem;
1056 }
1057
1058 const state_machine *get_sm () const { return m_sd.m_sm; }
1059
1060private:
1061 typedef reachability<eg_traits> enode_reachability;
1062
1063 const exploded_graph &m_eg;
1064
1065 /* The enode where the diagnostic occurs. */
1066 const exploded_node *m_diag_enode;
1067
1068 const saved_diagnostic &m_sd;
1069
1070 /* Precompute all enodes from which the diagnostic is reachable. */
1071 enode_reachability m_reachability;
1072
1073 const feasibility_problem *m_feasibility_problem;
1074};
1075
1076/* Determine the emission location for PD at STMT in FUN. */
1077
1078static location_t
1079get_emission_location (const gimple *stmt, function *fun,
1080 const pending_diagnostic &pd)
1081{
1082 location_t loc = get_stmt_location (stmt, fun);
1083
1084 /* Allow the pending_diagnostic to fix up the location. */
1085 loc = pd.fixup_location (loc, primary: true);
1086
1087 return loc;
1088}
1089
1090/* class diagnostic_manager. */
1091
1092/* diagnostic_manager's ctor. */
1093
1094diagnostic_manager::diagnostic_manager (logger *logger, engine *eng,
1095 int verbosity)
1096: log_user (logger), m_eng (eng), m_verbosity (verbosity),
1097 m_num_disabled_diagnostics (0)
1098{
1099}
1100
1101/* Queue pending_diagnostic D at ENODE for later emission.
1102 Return true/false signifying if the diagnostic was actually added. */
1103
1104bool
1105diagnostic_manager::add_diagnostic (const state_machine *sm,
1106 const pending_location &ploc,
1107 tree var,
1108 const svalue *sval,
1109 state_machine::state_t state,
1110 std::unique_ptr<pending_diagnostic> d)
1111{
1112 LOG_FUNC (get_logger ());
1113
1114 /* We must have an enode in order to be able to look for paths
1115 through the exploded_graph to the diagnostic. */
1116 gcc_assert (ploc.m_enode);
1117
1118 /* If this warning is ultimately going to be rejected by a -Wno-analyzer-*
1119 flag, reject it now.
1120 We can only do this for diagnostics where we already know the stmt,
1121 and thus can determine the emission location. */
1122 if (ploc.m_stmt)
1123 {
1124 location_t loc
1125 = get_emission_location (stmt: ploc.m_stmt, fun: ploc.m_snode->m_fun, pd: *d);
1126 int option = d->get_controlling_option ();
1127 if (!warning_enabled_at (loc, opt: option))
1128 {
1129 if (get_logger ())
1130 get_logger ()->log (fmt: "rejecting disabled warning %qs",
1131 d->get_kind ());
1132 m_num_disabled_diagnostics++;
1133 return false;
1134 }
1135 }
1136
1137 saved_diagnostic *sd
1138 = new saved_diagnostic (sm, ploc, var, sval, state, std::move (d),
1139 m_saved_diagnostics.length ());
1140 m_saved_diagnostics.safe_push (obj: sd);
1141 ploc.m_enode->add_diagnostic (sd);
1142 if (get_logger ())
1143 log (fmt: "adding saved diagnostic %i at SN %i to EN %i: %qs",
1144 sd->get_index (),
1145 ploc.m_snode->m_index, ploc.m_enode->m_index, sd->m_d->get_kind ());
1146 return true;
1147}
1148
1149/* Queue pending_diagnostic D at ENODE for later emission.
1150 Return true/false signifying if the diagnostic was actually added.
1151 Take ownership of D (or delete it). */
1152
1153bool
1154diagnostic_manager::add_diagnostic (const pending_location &ploc,
1155 std::unique_ptr<pending_diagnostic> d)
1156{
1157 gcc_assert (ploc.m_enode);
1158 return add_diagnostic (NULL, ploc, NULL_TREE, NULL, state: 0, d: std::move (d));
1159}
1160
1161/* Add PN to the most recent saved_diagnostic. */
1162
1163void
1164diagnostic_manager::add_note (std::unique_ptr<pending_note> pn)
1165{
1166 LOG_FUNC (get_logger ());
1167 gcc_assert (pn);
1168
1169 /* Get most recent saved_diagnostic. */
1170 gcc_assert (m_saved_diagnostics.length () > 0);
1171 saved_diagnostic *sd = m_saved_diagnostics[m_saved_diagnostics.length () - 1];
1172 sd->add_note (pn: std::move (pn));
1173}
1174
1175/* Add EVENT to the most recent saved_diagnostic. */
1176
1177void
1178diagnostic_manager::add_event (std::unique_ptr<checker_event> event)
1179{
1180 LOG_FUNC (get_logger ());
1181 gcc_assert (event);
1182
1183 /* Get most recent saved_diagnostic. */
1184 gcc_assert (m_saved_diagnostics.length () > 0);
1185 saved_diagnostic *sd = m_saved_diagnostics[m_saved_diagnostics.length () - 1];
1186 sd->add_event (event: std::move (event));
1187}
1188
1189/* Return a new json::object of the form
1190 {"diagnostics" : [obj for saved_diagnostic]}. */
1191
1192json::object *
1193diagnostic_manager::to_json () const
1194{
1195 json::object *dm_obj = new json::object ();
1196
1197 {
1198 json::array *sd_arr = new json::array ();
1199 int i;
1200 saved_diagnostic *sd;
1201 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1202 sd_arr->append (v: sd->to_json ());
1203 dm_obj->set (key: "diagnostics", v: sd_arr);
1204 }
1205
1206 return dm_obj;
1207}
1208
1209/* A class for identifying sets of duplicated pending_diagnostic.
1210
1211 We want to find the simplest saved_diagnostic amongst those that share a
1212 dedupe_key. */
1213
1214class dedupe_key
1215{
1216public:
1217 dedupe_key (const saved_diagnostic &sd)
1218 : m_sd (sd), m_stmt (sd.m_stmt), m_loc (sd.m_loc)
1219 {
1220 gcc_assert (m_stmt || m_loc != UNKNOWN_LOCATION);
1221 }
1222
1223 hashval_t hash () const
1224 {
1225 inchash::hash hstate;
1226 hstate.add_ptr (ptr: m_stmt);
1227 // TODO: m_sd
1228 return hstate.end ();
1229 }
1230 bool operator== (const dedupe_key &other) const
1231 {
1232 return (m_sd == other.m_sd
1233 && m_stmt == other.m_stmt
1234 && m_loc == other.m_loc);
1235 }
1236
1237 location_t get_location () const
1238 {
1239 if (m_loc != UNKNOWN_LOCATION)
1240 return m_loc;
1241 gcc_assert (m_stmt);
1242 return m_stmt->location;
1243 }
1244
1245 /* A qsort comparator for use by dedupe_winners::emit_best
1246 to sort them into location_t order. */
1247
1248 static int
1249 comparator (const void *p1, const void *p2)
1250 {
1251 const dedupe_key *pk1 = *(const dedupe_key * const *)p1;
1252 const dedupe_key *pk2 = *(const dedupe_key * const *)p2;
1253
1254 location_t loc1 = pk1->get_location ();
1255 location_t loc2 = pk2->get_location ();
1256
1257 if (int cmp = linemap_compare_locations (set: line_table, pre: loc2, post: loc1))
1258 return cmp;
1259 if (int cmp = ((int)pk1->m_sd.get_epath_length ()
1260 - (int)pk2->m_sd.get_epath_length ()))
1261 return cmp;
1262 if (int cmp = strcmp (s1: pk1->m_sd.m_d->get_kind (),
1263 s2: pk2->m_sd.m_d->get_kind ()))
1264 return cmp;
1265 return 0;
1266 }
1267
1268 const saved_diagnostic &m_sd;
1269 const gimple *m_stmt;
1270 location_t m_loc;
1271};
1272
1273/* Traits for use by dedupe_winners. */
1274
1275class dedupe_hash_map_traits
1276{
1277public:
1278 typedef const dedupe_key *key_type;
1279 typedef saved_diagnostic *value_type;
1280 typedef saved_diagnostic *compare_type;
1281
1282 static inline hashval_t hash (const key_type &v)
1283 {
1284 return v->hash ();
1285 }
1286 static inline bool equal_keys (const key_type &k1, const key_type &k2)
1287 {
1288 return *k1 == *k2;
1289 }
1290 template <typename T>
1291 static inline void remove (T &)
1292 {
1293 // TODO
1294 }
1295 template <typename T>
1296 static inline void mark_deleted (T &entry)
1297 {
1298 entry.m_key = reinterpret_cast<key_type> (1);
1299 }
1300 template <typename T>
1301 static inline void mark_empty (T &entry)
1302 {
1303 entry.m_key = NULL;
1304 }
1305 template <typename T>
1306 static inline bool is_deleted (const T &entry)
1307 {
1308 return entry.m_key == reinterpret_cast<key_type> (1);
1309 }
1310 template <typename T>
1311 static inline bool is_empty (const T &entry)
1312 {
1313 return entry.m_key == NULL;
1314 }
1315 static const bool empty_zero_p = true;
1316};
1317
1318/* A class for deduplicating diagnostics and finding (and emitting) the
1319 best saved_diagnostic within each partition. */
1320
1321class dedupe_winners
1322{
1323public:
1324 ~dedupe_winners ()
1325 {
1326 /* Delete all keys, but not the saved_diagnostics. */
1327 for (map_t::iterator iter = m_map.begin ();
1328 iter != m_map.end ();
1329 ++iter)
1330 delete (*iter).first;
1331 }
1332
1333 /* Determine an exploded_path for SD using PF and, if it's feasible,
1334 determine if SD is the best seen so far for its dedupe_key.
1335 Record the winning SD for each dedupe_key. */
1336
1337 void add (logger *logger,
1338 epath_finder *pf,
1339 saved_diagnostic *sd)
1340 {
1341 /* Determine best epath for SD. */
1342 if (!sd->calc_best_epath (pf))
1343 return;
1344
1345 dedupe_key *key = new dedupe_key (*sd);
1346 if (saved_diagnostic **slot = m_map.get (k: key))
1347 {
1348 if (logger)
1349 logger->log (fmt: "already have this dedupe_key");
1350
1351 saved_diagnostic *cur_best_sd = *slot;
1352
1353 if (sd->get_epath_length () < cur_best_sd->get_epath_length ())
1354 {
1355 /* We've got a shorter path for the key; replace
1356 the current candidate, marking it as a duplicate of SD. */
1357 if (logger)
1358 logger->log (fmt: "length %i is better than existing length %i;"
1359 " taking over this dedupe_key",
1360 sd->get_epath_length (),
1361 cur_best_sd->get_epath_length ());
1362 sd->add_duplicate (other: cur_best_sd);
1363 *slot = sd;
1364 }
1365 else
1366 /* We haven't beaten the current best candidate; add SD
1367 as a duplicate of it. */
1368 {
1369 if (logger)
1370 logger->log (fmt: "length %i isn't better than existing length %i;"
1371 " dropping this candidate",
1372 sd->get_epath_length (),
1373 cur_best_sd->get_epath_length ());
1374 cur_best_sd->add_duplicate (other: sd);
1375 }
1376 delete key;
1377 }
1378 else
1379 {
1380 /* This is the first candidate for this key. */
1381 m_map.put (k: key, v: sd);
1382 if (logger)
1383 logger->log (fmt: "first candidate for this dedupe_key");
1384 }
1385 }
1386
1387 /* Handle interactions between the dedupe winners, so that some
1388 diagnostics can supercede others (of different kinds).
1389
1390 We want use-after-free to supercede use-of-unitialized-value,
1391 so that if we have these at the same stmt, we don't emit
1392 a use-of-uninitialized, just the use-after-free. */
1393
1394 void handle_interactions (diagnostic_manager *dm)
1395 {
1396 LOG_SCOPE (dm->get_logger ());
1397 auto_vec<const dedupe_key *> superceded;
1398 for (auto outer : m_map)
1399 {
1400 const saved_diagnostic *outer_sd = outer.second;
1401 for (auto inner : m_map)
1402 {
1403 const saved_diagnostic *inner_sd = inner.second;
1404 if (inner_sd->supercedes_p (other: *outer_sd))
1405 {
1406 superceded.safe_push (obj: outer.first);
1407 if (dm->get_logger ())
1408 dm->log (fmt: "sd[%i] \"%s\" superceded by sd[%i] \"%s\"",
1409 outer_sd->get_index (), outer_sd->m_d->get_kind (),
1410 inner_sd->get_index (), inner_sd->m_d->get_kind ());
1411 break;
1412 }
1413 }
1414 }
1415 for (auto iter : superceded)
1416 m_map.remove (k: iter);
1417 }
1418
1419 /* Emit the simplest diagnostic within each set. */
1420
1421 void emit_best (diagnostic_manager *dm,
1422 const exploded_graph &eg)
1423 {
1424 LOG_SCOPE (dm->get_logger ());
1425
1426 /* Get keys into a vec for sorting. */
1427 auto_vec<const dedupe_key *> keys (m_map.elements ());
1428 for (map_t::iterator iter = m_map.begin ();
1429 iter != m_map.end ();
1430 ++iter)
1431 keys.quick_push (obj: (*iter).first);
1432
1433 dm->log (fmt: "# keys after de-duplication: %i", keys.length ());
1434
1435 /* Sort into a good emission order. */
1436 keys.qsort (dedupe_key::comparator);
1437
1438 /* Emit the best saved_diagnostics for each key. */
1439 int i;
1440 const dedupe_key *key;
1441 FOR_EACH_VEC_ELT (keys, i, key)
1442 {
1443 saved_diagnostic **slot = m_map.get (k: key);
1444 gcc_assert (*slot);
1445 saved_diagnostic *sd = *slot;
1446 dm->emit_saved_diagnostic (eg, sd&: *sd);
1447 }
1448 }
1449
1450private:
1451 /* This maps from each dedupe_key to a current best saved_diagnostic. */
1452
1453 typedef hash_map<const dedupe_key *, saved_diagnostic *,
1454 dedupe_hash_map_traits> map_t;
1455 map_t m_map;
1456};
1457
1458/* Emit all saved diagnostics. */
1459
1460void
1461diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
1462{
1463 LOG_SCOPE (get_logger ());
1464 auto_timevar tv (TV_ANALYZER_DIAGNOSTICS);
1465 log (fmt: "# saved diagnostics: %i", m_saved_diagnostics.length ());
1466 log (fmt: "# disabled diagnostics: %i", m_num_disabled_diagnostics);
1467 if (get_logger ())
1468 {
1469 unsigned i;
1470 saved_diagnostic *sd;
1471 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1472 log (fmt: "[%i] sd: %qs at EN: %i, SN: %i",
1473 i, sd->m_d->get_kind (), sd->m_enode->m_index,
1474 sd->m_snode->m_index);
1475 }
1476
1477 if (m_saved_diagnostics.length () == 0)
1478 return;
1479
1480 /* Compute the shortest_paths once, sharing it between all diagnostics. */
1481 epath_finder pf (eg);
1482
1483 /* Iterate through all saved diagnostics, adding them to a dedupe_winners
1484 instance. This partitions the saved diagnostics by dedupe_key,
1485 generating exploded_paths for them, and retaining the best one in each
1486 partition. */
1487 dedupe_winners best_candidates;
1488
1489 int i;
1490 saved_diagnostic *sd;
1491 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1492 best_candidates.add (logger: get_logger (), pf: &pf, sd);
1493
1494 best_candidates.handle_interactions (dm: this);
1495
1496 /* For each dedupe-key, call emit_saved_diagnostic on the "best"
1497 saved_diagnostic. */
1498 best_candidates.emit_best (dm: this, eg);
1499}
1500
1501/* Given a saved_diagnostic SD with m_best_epath through EG,
1502 create an checker_path of suitable events and use it to call
1503 SD's underlying pending_diagnostic "emit" vfunc to emit a diagnostic. */
1504
1505void
1506diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
1507 saved_diagnostic &sd)
1508{
1509 LOG_SCOPE (get_logger ());
1510 log (fmt: "sd[%i]: %qs at SN: %i",
1511 sd.get_index (), sd.m_d->get_kind (), sd.m_snode->m_index);
1512 log (fmt: "num dupes: %i", sd.get_num_dupes ());
1513
1514 pretty_printer *pp = global_dc->printer->clone ();
1515
1516 const exploded_path *epath = sd.get_best_epath ();
1517 gcc_assert (epath);
1518
1519 /* Precompute all enodes from which the diagnostic is reachable. */
1520 path_builder pb (eg, *epath, sd.get_feasibility_problem (), sd);
1521
1522 /* This is the diagnostic_path subclass that will be built for
1523 the diagnostic. */
1524 checker_path emission_path (get_logger ());
1525
1526 /* Populate emission_path with a full description of EPATH. */
1527 build_emission_path (pb, epath: *epath, emission_path: &emission_path);
1528
1529 /* Now prune it to just cover the most pertinent events. */
1530 prune_path (path: &emission_path, sm: sd.m_sm, sval: sd.m_sval, state: sd.m_state);
1531
1532 /* Add any saved events to the path, giving contextual information
1533 about what the analyzer was simulating as the diagnostic was
1534 generated. These don't get pruned, as they are probably pertinent. */
1535 sd.add_any_saved_events (dst_path&: emission_path);
1536
1537 /* Add a final event to the path, covering the diagnostic itself.
1538 We use the final enode from the epath, which might be different from
1539 the sd.m_enode, as the dedupe code doesn't care about enodes, just
1540 snodes. */
1541 sd.m_d->add_final_event (sm: sd.m_sm, enode: epath->get_final_enode (), stmt: sd.m_stmt,
1542 var: sd.m_var, state: sd.m_state, emission_path: &emission_path);
1543
1544 /* The "final" event might not be final; if the saved_diagnostic has a
1545 trailing eedge stashed, add any events for it. This is for use
1546 in handling longjmp, to show where a longjmp is rewinding to. */
1547 if (sd.m_trailing_eedge)
1548 add_events_for_eedge (pb, eedge: *sd.m_trailing_eedge, emission_path: &emission_path, NULL);
1549
1550 emission_path.inject_any_inlined_call_events (logger: get_logger ());
1551
1552 emission_path.prepare_for_emission (pd: sd.m_d.get ());
1553
1554 location_t loc = sd.m_loc;
1555 if (loc == UNKNOWN_LOCATION)
1556 loc = get_emission_location (stmt: sd.m_stmt, fun: sd.m_snode->m_fun, pd: *sd.m_d);
1557
1558 /* Allow the pending_diagnostic to fix up the locations of events. */
1559 emission_path.fixup_locations (pd: sd.m_d.get ());
1560
1561 gcc_rich_location rich_loc (loc);
1562 rich_loc.set_path (&emission_path);
1563
1564 auto_diagnostic_group d;
1565 auto_cfun sentinel (sd.m_snode->m_fun);
1566 if (sd.m_d->emit (&rich_loc, get_logger ()))
1567 {
1568 sd.emit_any_notes ();
1569
1570 unsigned num_dupes = sd.get_num_dupes ();
1571 if (flag_analyzer_show_duplicate_count && num_dupes > 0)
1572 inform_n (loc, num_dupes,
1573 "%i duplicate", "%i duplicates",
1574 num_dupes);
1575 if (flag_dump_analyzer_exploded_paths)
1576 {
1577 auto_timevar tv (TV_ANALYZER_DUMP);
1578 pretty_printer pp;
1579 pp_printf (&pp, "%s.%i.%s.epath.txt",
1580 dump_base_name, sd.get_index (), sd.m_d->get_kind ());
1581 char *filename = xstrdup (pp_formatted_text (&pp));
1582 epath->dump_to_file (filename, ext_state: eg.get_ext_state ());
1583 inform (loc, "exploded path written to %qs", filename);
1584 free (ptr: filename);
1585 }
1586 }
1587 delete pp;
1588}
1589
1590/* Emit a "path" of events to EMISSION_PATH describing the exploded path
1591 EPATH within EG. */
1592
1593void
1594diagnostic_manager::build_emission_path (const path_builder &pb,
1595 const exploded_path &epath,
1596 checker_path *emission_path) const
1597{
1598 LOG_SCOPE (get_logger ());
1599
1600 interesting_t interest;
1601 pb.get_pending_diagnostic ()->mark_interesting_stuff (&interest);
1602
1603 /* Add region creation events for any globals of interest, at the
1604 beginning of the path. */
1605 {
1606 for (auto reg : interest.m_region_creation)
1607 switch (reg->get_memory_space ())
1608 {
1609 default:
1610 continue;
1611 case MEMSPACE_CODE:
1612 case MEMSPACE_GLOBALS:
1613 case MEMSPACE_READONLY_DATA:
1614 {
1615 const region *base_reg = reg->get_base_region ();
1616 if (tree decl = base_reg->maybe_get_decl ())
1617 if (DECL_P (decl)
1618 && DECL_SOURCE_LOCATION (decl) != UNKNOWN_LOCATION)
1619 {
1620 emission_path->add_region_creation_events
1621 (pd: pb.get_pending_diagnostic (),
1622 reg, NULL,
1623 loc_info: event_loc_info (DECL_SOURCE_LOCATION (decl),
1624 NULL_TREE,
1625 0),
1626 debug: m_verbosity > 3);
1627 }
1628 }
1629 }
1630 }
1631
1632 /* Walk EPATH, adding events as appropriate. */
1633 for (unsigned i = 0; i < epath.m_edges.length (); i++)
1634 {
1635 const exploded_edge *eedge = epath.m_edges[i];
1636 add_events_for_eedge (pb, eedge: *eedge, emission_path, interest: &interest);
1637 }
1638 add_event_on_final_node (pb, final_enode: epath.get_final_enode (),
1639 emission_path, interest: &interest);
1640}
1641
1642/* Emit a region_creation_event when requested on the last statement in
1643 the path.
1644
1645 If a region_creation_event should be emitted on the last statement of the
1646 path, we need to peek to the successors to get whether the final enode
1647 created a region.
1648*/
1649
1650void
1651diagnostic_manager::add_event_on_final_node (const path_builder &pb,
1652 const exploded_node *final_enode,
1653 checker_path *emission_path,
1654 interesting_t *interest) const
1655{
1656 const program_point &src_point = final_enode->get_point ();
1657 const int src_stack_depth = src_point.get_stack_depth ();
1658 const program_state &src_state = final_enode->get_state ();
1659 const region_model *src_model = src_state.m_region_model;
1660
1661 unsigned j;
1662 exploded_edge *e;
1663 FOR_EACH_VEC_ELT (final_enode->m_succs, j, e)
1664 {
1665 exploded_node *dst = e->m_dest;
1666 const program_state &dst_state = dst->get_state ();
1667 const region_model *dst_model = dst_state.m_region_model;
1668 if (src_model->get_dynamic_extents ()
1669 != dst_model->get_dynamic_extents ())
1670 {
1671 unsigned i;
1672 const region *reg;
1673 bool emitted = false;
1674 FOR_EACH_VEC_ELT (interest->m_region_creation, i, reg)
1675 {
1676 const region *base_reg = reg->get_base_region ();
1677 const svalue *old_extents
1678 = src_model->get_dynamic_extents (reg: base_reg);
1679 const svalue *new_extents
1680 = dst_model->get_dynamic_extents (reg: base_reg);
1681 if (old_extents == NULL && new_extents != NULL)
1682 switch (base_reg->get_kind ())
1683 {
1684 default:
1685 break;
1686 case RK_HEAP_ALLOCATED:
1687 case RK_ALLOCA:
1688 emission_path->add_region_creation_events
1689 (pd: pb.get_pending_diagnostic (),
1690 reg,
1691 model: dst_model,
1692 loc_info: event_loc_info (src_point.get_location (),
1693 src_point.get_fndecl (),
1694 src_stack_depth),
1695 debug: false);
1696 emitted = true;
1697 break;
1698 }
1699 }
1700 if (emitted)
1701 break;
1702 }
1703 }
1704}
1705
1706/* Subclass of state_change_visitor that creates state_change_event
1707 instances. */
1708
1709class state_change_event_creator : public state_change_visitor
1710{
1711public:
1712 state_change_event_creator (const path_builder &pb,
1713 const exploded_edge &eedge,
1714 checker_path *emission_path)
1715 : m_pb (pb),
1716 m_eedge (eedge),
1717 m_emission_path (emission_path)
1718 {}
1719
1720 bool on_global_state_change (const state_machine &sm,
1721 state_machine::state_t src_sm_val,
1722 state_machine::state_t dst_sm_val)
1723 final override
1724 {
1725 if (&sm != m_pb.get_sm ())
1726 return false;
1727 const exploded_node *src_node = m_eedge.m_src;
1728 const program_point &src_point = src_node->get_point ();
1729 const int src_stack_depth = src_point.get_stack_depth ();
1730 const exploded_node *dst_node = m_eedge.m_dest;
1731 const gimple *stmt = src_point.get_stmt ();
1732 const supernode *supernode = src_point.get_supernode ();
1733 const program_state &dst_state = dst_node->get_state ();
1734
1735 int stack_depth = src_stack_depth;
1736
1737 m_emission_path->add_event
1738 (event: make_unique<state_change_event> (args&: supernode,
1739 args&: stmt,
1740 args&: stack_depth,
1741 args: sm,
1742 NULL,
1743 args&: src_sm_val,
1744 args&: dst_sm_val,
1745 NULL,
1746 args: dst_state,
1747 args&: src_node));
1748 return false;
1749 }
1750
1751 bool on_state_change (const state_machine &sm,
1752 state_machine::state_t src_sm_val,
1753 state_machine::state_t dst_sm_val,
1754 const svalue *sval,
1755 const svalue *dst_origin_sval) final override
1756 {
1757 if (&sm != m_pb.get_sm ())
1758 return false;
1759 const exploded_node *src_node = m_eedge.m_src;
1760 const program_point &src_point = src_node->get_point ();
1761 const int src_stack_depth = src_point.get_stack_depth ();
1762 const exploded_node *dst_node = m_eedge.m_dest;
1763 const gimple *stmt = src_point.get_stmt ();
1764 const supernode *supernode = src_point.get_supernode ();
1765 const program_state &dst_state = dst_node->get_state ();
1766
1767 int stack_depth = src_stack_depth;
1768
1769 if (m_eedge.m_sedge
1770 && m_eedge.m_sedge->m_kind == SUPEREDGE_CFG_EDGE)
1771 {
1772 supernode = src_point.get_supernode ();
1773 stmt = supernode->get_last_stmt ();
1774 stack_depth = src_stack_depth;
1775 }
1776
1777 /* Bulletproofing for state changes at calls/returns;
1778 TODO: is there a better way? */
1779 if (!stmt)
1780 return false;
1781
1782 m_emission_path->add_event
1783 (event: make_unique<state_change_event> (args&: supernode,
1784 args&: stmt,
1785 args&: stack_depth,
1786 args: sm,
1787 args&: sval,
1788 args&: src_sm_val,
1789 args&: dst_sm_val,
1790 args&: dst_origin_sval,
1791 args: dst_state,
1792 args&: src_node));
1793 return false;
1794 }
1795
1796 const path_builder &m_pb;
1797 const exploded_edge &m_eedge;
1798 checker_path *m_emission_path;
1799};
1800
1801/* Compare SRC_STATE and DST_STATE (which use EXT_STATE), and call
1802 VISITOR's on_state_change for every sm-state change that occurs
1803 to a tree, and on_global_state_change for every global state change
1804 that occurs.
1805
1806 This determines the state changes that ought to be reported to
1807 the user: a combination of the effects of changes to sm_state_map
1808 (which maps svalues to sm-states), and of region_model changes
1809 (which map trees to svalues).
1810
1811 Bail out early and return true if any call to on_global_state_change
1812 or on_state_change returns true, otherwise return false.
1813
1814 This is split out to make it easier to experiment with changes to
1815 exploded_node granularity (so that we can observe what state changes
1816 lead to state_change_events being emitted). */
1817
1818bool
1819for_each_state_change (const program_state &src_state,
1820 const program_state &dst_state,
1821 const extrinsic_state &ext_state,
1822 state_change_visitor *visitor)
1823{
1824 gcc_assert (src_state.m_checker_states.length ()
1825 == ext_state.get_num_checkers ());
1826 gcc_assert (dst_state.m_checker_states.length ()
1827 == ext_state.get_num_checkers ());
1828 for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
1829 {
1830 const state_machine &sm = ext_state.get_sm (idx: i);
1831 const sm_state_map &src_smap = *src_state.m_checker_states[i];
1832 const sm_state_map &dst_smap = *dst_state.m_checker_states[i];
1833
1834 /* Add events for any global state changes. */
1835 if (src_smap.get_global_state () != dst_smap.get_global_state ())
1836 if (visitor->on_global_state_change (sm,
1837 src_sm_val: src_smap.get_global_state (),
1838 dst_sm_val: dst_smap.get_global_state ()))
1839 return true;
1840
1841 /* Add events for per-svalue state changes. */
1842 for (sm_state_map::iterator_t iter = dst_smap.begin ();
1843 iter != dst_smap.end ();
1844 ++iter)
1845 {
1846 const svalue *sval = (*iter).first;
1847 state_machine::state_t dst_sm_val = (*iter).second.m_state;
1848 state_machine::state_t src_sm_val
1849 = src_smap.get_state (sval, ext_state);
1850 if (dst_sm_val != src_sm_val)
1851 {
1852 const svalue *origin_sval = (*iter).second.m_origin;
1853 if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
1854 dst_sval: sval, dst_origin_sval: origin_sval))
1855 return true;
1856 }
1857 }
1858 }
1859 return false;
1860}
1861
1862/* An sm_context for adding state_change_event on assignments to NULL,
1863 where the default state isn't m_start. Storing such state in the
1864 sm_state_map would lead to bloat of the exploded_graph, so we want
1865 to leave it as a default state, and inject state change events here
1866 when we have a diagnostic.
1867 Find transitions of constants, for handling on_zero_assignment. */
1868
1869struct null_assignment_sm_context : public sm_context
1870{
1871 null_assignment_sm_context (int sm_idx,
1872 const state_machine &sm,
1873 const program_state *old_state,
1874 const program_state *new_state,
1875 const gimple *stmt,
1876 const program_point *point,
1877 checker_path *emission_path,
1878 const extrinsic_state &ext_state)
1879 : sm_context (sm_idx, sm), m_old_state (old_state), m_new_state (new_state),
1880 m_stmt (stmt), m_point (point), m_emission_path (emission_path),
1881 m_ext_state (ext_state)
1882 {
1883 }
1884
1885 tree get_fndecl_for_call (const gcall */*call*/) final override
1886 {
1887 return NULL_TREE;
1888 }
1889
1890 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
1891 tree var) final override
1892 {
1893 const svalue *var_old_sval
1894 = m_old_state->m_region_model->get_rvalue (expr: var, NULL);
1895 const sm_state_map *old_smap = m_old_state->m_checker_states[m_sm_idx];
1896
1897 state_machine::state_t current
1898 = old_smap->get_state (sval: var_old_sval, ext_state: m_ext_state);
1899
1900 return current;
1901 }
1902
1903 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
1904 const svalue *sval) final override
1905 {
1906 const sm_state_map *old_smap = m_old_state->m_checker_states[m_sm_idx];
1907 state_machine::state_t current = old_smap->get_state (sval, ext_state: m_ext_state);
1908 return current;
1909 }
1910
1911 void set_next_state (const gimple *stmt,
1912 tree var,
1913 state_machine::state_t to,
1914 tree origin ATTRIBUTE_UNUSED) final override
1915 {
1916 state_machine::state_t from = get_state (stmt, var);
1917 if (from != m_sm.get_start_state ())
1918 return;
1919 if (!is_transition_to_null (s: to))
1920 return;
1921
1922 const svalue *var_new_sval
1923 = m_new_state->m_region_model->get_rvalue (expr: var, NULL);
1924
1925 const supernode *supernode = m_point->get_supernode ();
1926 int stack_depth = m_point->get_stack_depth ();
1927
1928 m_emission_path->add_event
1929 (event: make_unique<state_change_event> (args&: supernode,
1930 args&: m_stmt,
1931 args&: stack_depth,
1932 args: m_sm,
1933 args&: var_new_sval,
1934 args&: from, args&: to,
1935 NULL,
1936 args: *m_new_state,
1937 NULL));
1938 }
1939
1940 void set_next_state (const gimple *stmt,
1941 const svalue *sval,
1942 state_machine::state_t to,
1943 tree origin ATTRIBUTE_UNUSED) final override
1944 {
1945 state_machine::state_t from = get_state (stmt, sval);
1946 if (from != m_sm.get_start_state ())
1947 return;
1948 if (!is_transition_to_null (s: to))
1949 return;
1950
1951 const supernode *supernode = m_point->get_supernode ();
1952 int stack_depth = m_point->get_stack_depth ();
1953
1954 m_emission_path->add_event
1955 (event: make_unique<state_change_event> (args&: supernode,
1956 args&: m_stmt,
1957 args&: stack_depth,
1958 args: m_sm,
1959 args&: sval,
1960 args&: from, args&: to,
1961 NULL,
1962 args: *m_new_state,
1963 NULL));
1964 }
1965
1966 void warn (const supernode *, const gimple *,
1967 tree, std::unique_ptr<pending_diagnostic>) final override
1968 {
1969 }
1970 void warn (const supernode *, const gimple *,
1971 const svalue *, std::unique_ptr<pending_diagnostic>) final override
1972 {
1973 }
1974
1975 tree get_diagnostic_tree (tree expr) final override
1976 {
1977 return expr;
1978 }
1979
1980 tree get_diagnostic_tree (const svalue *sval) final override
1981 {
1982 return m_new_state->m_region_model->get_representative_tree (sval);
1983 }
1984
1985 state_machine::state_t get_global_state () const final override
1986 {
1987 return 0;
1988 }
1989
1990 void set_global_state (state_machine::state_t) final override
1991 {
1992 /* No-op. */
1993 }
1994
1995 void on_custom_transition (custom_transition *) final override
1996 {
1997 }
1998
1999 tree is_zero_assignment (const gimple *stmt) final override
2000 {
2001 const gassign *assign_stmt = dyn_cast <const gassign *> (p: stmt);
2002 if (!assign_stmt)
2003 return NULL_TREE;
2004 if (const svalue *sval
2005 = m_new_state->m_region_model->get_gassign_result (assign: assign_stmt, NULL))
2006 if (tree cst = sval->maybe_get_constant ())
2007 if (::zerop(cst))
2008 return gimple_assign_lhs (gs: assign_stmt);
2009 return NULL_TREE;
2010 }
2011
2012 const program_state *get_old_program_state () const final override
2013 {
2014 return m_old_state;
2015 }
2016 const program_state *get_new_program_state () const final override
2017 {
2018 return m_new_state;
2019 }
2020
2021 /* We only care about transitions to the "null" state
2022 within sm-malloc. Special-case this. */
2023 static bool is_transition_to_null (state_machine::state_t s)
2024 {
2025 return !strcmp (s1: s->get_name (), s2: "null");
2026 }
2027
2028 const program_state *m_old_state;
2029 const program_state *m_new_state;
2030 const gimple *m_stmt;
2031 const program_point *m_point;
2032 checker_path *m_emission_path;
2033 const extrinsic_state &m_ext_state;
2034};
2035
2036/* Subroutine of diagnostic_manager::build_emission_path.
2037 Add any events for EEDGE to EMISSION_PATH. */
2038
2039void
2040diagnostic_manager::add_events_for_eedge (const path_builder &pb,
2041 const exploded_edge &eedge,
2042 checker_path *emission_path,
2043 interesting_t *interest) const
2044{
2045 const exploded_node *src_node = eedge.m_src;
2046 const program_point &src_point = src_node->get_point ();
2047 const int src_stack_depth = src_point.get_stack_depth ();
2048 const exploded_node *dst_node = eedge.m_dest;
2049 const program_point &dst_point = dst_node->get_point ();
2050 const int dst_stack_depth = dst_point.get_stack_depth ();
2051 if (get_logger ())
2052 {
2053 get_logger ()->start_log_line ();
2054 pretty_printer *pp = get_logger ()->get_printer ();
2055 pp_printf (pp, "EN %i -> EN %i: ",
2056 eedge.m_src->m_index,
2057 eedge.m_dest->m_index);
2058 src_point.print (pp, f: format (false));
2059 pp_string (pp, "-> ");
2060 dst_point.print (pp, f: format (false));
2061 get_logger ()->end_log_line ();
2062 }
2063 const program_state &src_state = src_node->get_state ();
2064 const program_state &dst_state = dst_node->get_state ();
2065
2066 /* Add state change events for the states that have changed.
2067 We add these before events for superedges, so that if we have a
2068 state_change_event due to following an edge, we'll get this sequence
2069 of events:
2070
2071 | if (!ptr)
2072 | ~
2073 | |
2074 | (1) assuming 'ptr' is non-NULL (state_change_event)
2075 | (2) following 'false' branch... (start_cfg_edge_event)
2076 ...
2077 | do_something (ptr);
2078 | ~~~~~~~~~~~~~^~~~~
2079 | |
2080 | (3) ...to here (end_cfg_edge_event). */
2081 state_change_event_creator visitor (pb, eedge, emission_path);
2082 for_each_state_change (src_state, dst_state, ext_state: pb.get_ext_state (),
2083 visitor: &visitor);
2084
2085 /* Allow non-standard edges to add events, e.g. when rewinding from
2086 longjmp to a setjmp. */
2087 if (eedge.m_custom_info)
2088 eedge.m_custom_info->add_events_to_path (emission_path, eedge);
2089
2090 /* Add events for superedges, function entries, and for statements. */
2091 switch (dst_point.get_kind ())
2092 {
2093 default:
2094 break;
2095 case PK_BEFORE_SUPERNODE:
2096 if (src_point.get_kind () == PK_AFTER_SUPERNODE)
2097 {
2098 if (eedge.m_sedge)
2099 add_events_for_superedge (pb, eedge, emission_path);
2100 }
2101 /* Add function entry events. */
2102 if (dst_point.get_supernode ()->entry_p ())
2103 {
2104 pb.get_pending_diagnostic ()->add_function_entry_event
2105 (eedge, emission_path);
2106 /* Create region_creation_events for on-stack regions within
2107 this frame. */
2108 if (interest)
2109 {
2110 unsigned i;
2111 const region *reg;
2112 FOR_EACH_VEC_ELT (interest->m_region_creation, i, reg)
2113 if (const frame_region *frame = reg->maybe_get_frame_region ())
2114 if (frame->get_fndecl () == dst_point.get_fndecl ())
2115 {
2116 const region *base_reg = reg->get_base_region ();
2117 if (tree decl = base_reg->maybe_get_decl ())
2118 if (DECL_P (decl)
2119 && DECL_SOURCE_LOCATION (decl) != UNKNOWN_LOCATION)
2120 {
2121 emission_path->add_region_creation_events
2122 (pd: pb.get_pending_diagnostic (),
2123 reg, model: dst_state.m_region_model,
2124 loc_info: event_loc_info (DECL_SOURCE_LOCATION (decl),
2125 dst_point.get_fndecl (),
2126 dst_stack_depth),
2127 debug: m_verbosity > 3);
2128 }
2129 }
2130 }
2131 }
2132 break;
2133 case PK_BEFORE_STMT:
2134 {
2135 const gimple *stmt = dst_point.get_stmt ();
2136 const gcall *call = dyn_cast <const gcall *> (p: stmt);
2137 if (call && is_setjmp_call_p (call))
2138 emission_path->add_event
2139 (event: make_unique<setjmp_event> (args: event_loc_info (stmt->location,
2140 dst_point.get_fndecl (),
2141 dst_stack_depth),
2142 args&: dst_node,
2143 args&: call));
2144 else
2145 emission_path->add_event
2146 (event: make_unique<statement_event> (args&: stmt,
2147 args: dst_point.get_fndecl (),
2148 args: dst_stack_depth, args: dst_state));
2149
2150 /* Create state change events for assignment to NULL.
2151 Iterate through the stmts in dst_enode, adding state change
2152 events for them. */
2153 if (dst_state.m_region_model)
2154 {
2155 log_scope s (get_logger (), "processing run of stmts");
2156 program_state iter_state (dst_state);
2157 program_point iter_point (dst_point);
2158 while (1)
2159 {
2160 const gimple *stmt = iter_point.get_stmt ();
2161 if (const gassign *assign = dyn_cast<const gassign *> (p: stmt))
2162 {
2163 const extrinsic_state &ext_state = pb.get_ext_state ();
2164 program_state old_state (iter_state);
2165 iter_state.m_region_model->on_assignment (stmt: assign, NULL);
2166 for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
2167 {
2168 const state_machine &sm = ext_state.get_sm (idx: i);
2169 null_assignment_sm_context sm_ctxt (i, sm,
2170 &old_state,
2171 &iter_state,
2172 stmt,
2173 &iter_point,
2174 emission_path,
2175 pb.get_ext_state ());
2176 sm.on_stmt (sm_ctxt: &sm_ctxt, node: dst_point.get_supernode (), stmt);
2177 // TODO: what about phi nodes?
2178 }
2179 }
2180 iter_point.next_stmt ();
2181 if (iter_point.get_kind () == PK_AFTER_SUPERNODE
2182 || (dst_node->m_succs.length () > 1
2183 && (iter_point
2184 == dst_node->m_succs[0]->m_dest->get_point ())))
2185 break;
2186 }
2187
2188 }
2189 }
2190 break;
2191 }
2192
2193 /* Look for changes in dynamic extents, which will identify
2194 the creation of heap-based regions and alloca regions. */
2195 if (interest)
2196 {
2197 const region_model *src_model = src_state.m_region_model;
2198 const region_model *dst_model = dst_state.m_region_model;
2199 if (src_model->get_dynamic_extents ()
2200 != dst_model->get_dynamic_extents ())
2201 {
2202 unsigned i;
2203 const region *reg;
2204 FOR_EACH_VEC_ELT (interest->m_region_creation, i, reg)
2205 {
2206 const region *base_reg = reg->get_base_region ();
2207 const svalue *old_extents
2208 = src_model->get_dynamic_extents (reg: base_reg);
2209 const svalue *new_extents
2210 = dst_model->get_dynamic_extents (reg: base_reg);
2211 if (old_extents == NULL && new_extents != NULL)
2212 switch (base_reg->get_kind ())
2213 {
2214 default:
2215 break;
2216 case RK_HEAP_ALLOCATED:
2217 case RK_ALLOCA:
2218 emission_path->add_region_creation_events
2219 (pd: pb.get_pending_diagnostic (),
2220 reg, model: dst_model,
2221 loc_info: event_loc_info (src_point.get_location (),
2222 src_point.get_fndecl (),
2223 src_stack_depth),
2224 debug: m_verbosity > 3);
2225 break;
2226 }
2227 }
2228 }
2229 }
2230
2231 if (pb.get_feasibility_problem ()
2232 && &pb.get_feasibility_problem ()->m_eedge == &eedge)
2233 {
2234 pretty_printer pp;
2235 pp_format_decoder (&pp) = default_tree_printer;
2236 pp_string (&pp,
2237 "this path would have been rejected as infeasible"
2238 " at this edge: ");
2239 pb.get_feasibility_problem ()->dump_to_pp (pp: &pp);
2240 emission_path->add_event
2241 (event: make_unique<precanned_custom_event>
2242 (args: event_loc_info (dst_point.get_location (),
2243 dst_point.get_fndecl (),
2244 dst_stack_depth),
2245 args: pp_formatted_text (&pp)));
2246 }
2247}
2248
2249/* Return true if EEDGE is a significant edge in the path to the diagnostic
2250 for PB.
2251
2252 Consider all of the sibling out-eedges from the same source enode
2253 as EEDGE.
2254 If there's no path from the destinations of those eedges to the
2255 diagnostic enode, then we have to take this eedge and thus it's
2256 significant.
2257
2258 Conversely if there is a path from the destination of any other sibling
2259 eedge to the diagnostic enode, then this edge is insignificant.
2260
2261 Example 1: redundant if-else:
2262
2263 (A) if (...) A
2264 (B) ... / \
2265 else B C
2266 (C) ... \ /
2267 (D) [DIAGNOSTIC] D
2268
2269 D is reachable by either B or C, so neither of these edges
2270 are significant.
2271
2272 Example 2: pertinent if-else:
2273
2274 (A) if (...) A
2275 (B) ... / \
2276 else B C
2277 (C) [NECESSARY CONDITION] | |
2278 (D) [POSSIBLE DIAGNOSTIC] D1 D2
2279
2280 D becomes D1 and D2 in the exploded graph, where the diagnostic occurs
2281 at D2. D2 is only reachable via C, so the A -> C edge is significant.
2282
2283 Example 3: redundant loop:
2284
2285 (A) while (...) +-->A
2286 (B) ... | / \
2287 (C) ... +-B C
2288 (D) [DIAGNOSTIC] |
2289 D
2290
2291 D is reachable from both B and C, so the A->C edge is not significant. */
2292
2293bool
2294diagnostic_manager::significant_edge_p (const path_builder &pb,
2295 const exploded_edge &eedge) const
2296{
2297 int i;
2298 exploded_edge *sibling;
2299 FOR_EACH_VEC_ELT (eedge.m_src->m_succs, i, sibling)
2300 {
2301 if (sibling == &eedge)
2302 continue;
2303 if (pb.reachable_from_p (src_enode: sibling->m_dest))
2304 {
2305 if (get_logger ())
2306 get_logger ()->log (fmt: " edge EN: %i -> EN: %i is insignificant as"
2307 " EN: %i is also reachable via"
2308 " EN: %i -> EN: %i",
2309 eedge.m_src->m_index, eedge.m_dest->m_index,
2310 pb.get_diag_node ()->m_index,
2311 sibling->m_src->m_index,
2312 sibling->m_dest->m_index);
2313 return false;
2314 }
2315 }
2316
2317 return true;
2318}
2319
2320/* Subroutine of diagnostic_manager::add_events_for_eedge
2321 where EEDGE has an underlying superedge i.e. a CFG edge,
2322 or an interprocedural call/return.
2323 Add any events for the superedge to EMISSION_PATH. */
2324
2325void
2326diagnostic_manager::add_events_for_superedge (const path_builder &pb,
2327 const exploded_edge &eedge,
2328 checker_path *emission_path)
2329 const
2330{
2331 gcc_assert (eedge.m_sedge);
2332
2333 /* Give diagnostics an opportunity to override this function. */
2334 pending_diagnostic *pd = pb.get_pending_diagnostic ();
2335 if (pd->maybe_add_custom_events_for_superedge (eedge, emission_path))
2336 return;
2337
2338 /* Don't add events for insignificant edges at verbosity levels below 3. */
2339 if (m_verbosity < 3)
2340 if (!significant_edge_p (pb, eedge))
2341 return;
2342
2343 const exploded_node *src_node = eedge.m_src;
2344 const program_point &src_point = src_node->get_point ();
2345 const exploded_node *dst_node = eedge.m_dest;
2346 const program_point &dst_point = dst_node->get_point ();
2347 const int src_stack_depth = src_point.get_stack_depth ();
2348 const int dst_stack_depth = dst_point.get_stack_depth ();
2349 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
2350
2351 switch (eedge.m_sedge->m_kind)
2352 {
2353 case SUPEREDGE_CFG_EDGE:
2354 {
2355 emission_path->add_event
2356 (event: make_unique<start_cfg_edge_event>
2357 (args: eedge,
2358 args: event_loc_info (last_stmt ? last_stmt->location : UNKNOWN_LOCATION,
2359 src_point.get_fndecl (),
2360 src_stack_depth)));
2361 emission_path->add_event
2362 (event: make_unique<end_cfg_edge_event>
2363 (args: eedge,
2364 args: event_loc_info (dst_point.get_supernode ()->get_start_location (),
2365 dst_point.get_fndecl (),
2366 dst_stack_depth)));
2367 }
2368 break;
2369
2370 case SUPEREDGE_CALL:
2371 pd->add_call_event (eedge, emission_path);
2372 break;
2373
2374 case SUPEREDGE_INTRAPROCEDURAL_CALL:
2375 {
2376 /* TODO: add a subclass for this, or generate events for the
2377 summary. */
2378 emission_path->add_event
2379 (event: make_unique<debug_event> (args: event_loc_info (last_stmt
2380 ? last_stmt->location
2381 : UNKNOWN_LOCATION,
2382 src_point.get_fndecl (),
2383 src_stack_depth),
2384 args: "call summary"));
2385 }
2386 break;
2387
2388 case SUPEREDGE_RETURN:
2389 {
2390 const return_superedge *return_edge
2391 = as_a <const return_superedge *> (p: eedge.m_sedge);
2392
2393 const gcall *call_stmt = return_edge->get_call_stmt ();
2394 emission_path->add_event
2395 (event: make_unique<return_event> (args: eedge,
2396 args: event_loc_info (call_stmt
2397 ? call_stmt->location
2398 : UNKNOWN_LOCATION,
2399 dst_point.get_fndecl (),
2400 dst_stack_depth)));
2401 }
2402 break;
2403 }
2404}
2405
2406/* Prune PATH, based on the verbosity level, to the most pertinent
2407 events for a diagnostic that involves VAR ending in state STATE
2408 (for state machine SM).
2409
2410 PATH is updated in place, and the redundant checker_events are deleted.
2411
2412 As well as deleting events, call record_critical_state on events in
2413 which state critical to the pending_diagnostic is being handled; see
2414 the comment for diagnostic_manager::prune_for_sm_diagnostic. */
2415
2416void
2417diagnostic_manager::prune_path (checker_path *path,
2418 const state_machine *sm,
2419 const svalue *sval,
2420 state_machine::state_t state) const
2421{
2422 LOG_FUNC (get_logger ());
2423 path->maybe_log (logger: get_logger (), desc: "path");
2424 prune_for_sm_diagnostic (path, sm, sval, state);
2425 prune_interproc_events (path);
2426 if (! flag_analyzer_show_events_in_system_headers)
2427 prune_system_headers (path);
2428 consolidate_conditions (path);
2429 finish_pruning (path);
2430 path->maybe_log (logger: get_logger (), desc: "pruned");
2431}
2432
2433/* A cheap test to determine if EXPR can be the expression of interest in
2434 an sm-diagnostic, so that we can reject cases where we have a non-lvalue.
2435 We don't have always have a model when calling this, so we can't use
2436 tentative_region_model_context, so there can be false positives. */
2437
2438static bool
2439can_be_expr_of_interest_p (tree expr)
2440{
2441 if (!expr)
2442 return false;
2443
2444 /* Reject constants. */
2445 if (CONSTANT_CLASS_P (expr))
2446 return false;
2447
2448 /* Otherwise assume that it can be an lvalue. */
2449 return true;
2450}
2451
2452/* First pass of diagnostic_manager::prune_path: apply verbosity level,
2453 pruning unrelated state change events.
2454
2455 Iterate backwards through PATH, skipping state change events that aren't
2456 VAR but update the pertinent VAR when state-copying occurs.
2457
2458 As well as deleting events, call record_critical_state on events in
2459 which state critical to the pending_diagnostic is being handled, so
2460 that the event's get_desc vfunc can potentially supply a more precise
2461 description of the event to the user.
2462 e.g. improving
2463 "calling 'foo' from 'bar'"
2464 to
2465 "passing possibly-NULL pointer 'ptr' to 'foo' from 'bar' as param 1"
2466 when the diagnostic relates to later dereferencing 'ptr'. */
2467
2468void
2469diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
2470 const state_machine *sm,
2471 const svalue *sval,
2472 state_machine::state_t state) const
2473{
2474 int idx = path->num_events () - 1;
2475 while (idx >= 0 && idx < (signed)path->num_events ())
2476 {
2477 checker_event *base_event = path->get_checker_event (idx);
2478 if (get_logger ())
2479 {
2480 if (sm)
2481 {
2482 if (sval)
2483 {
2484 label_text sval_desc = sval->get_desc ();
2485 log (fmt: "considering event %i (%s), with sval: %qs, state: %qs",
2486 idx, event_kind_to_string (ek: base_event->m_kind),
2487 sval_desc.get (), state->get_name ());
2488 }
2489 else
2490 log (fmt: "considering event %i (%s), with global state: %qs",
2491 idx, event_kind_to_string (ek: base_event->m_kind),
2492 state->get_name ());
2493 }
2494 else
2495 log (fmt: "considering event %i", idx);
2496 }
2497
2498 switch (base_event->m_kind)
2499 {
2500 default:
2501 gcc_unreachable ();
2502
2503 case EK_DEBUG:
2504 if (m_verbosity < 4)
2505 {
2506 log (fmt: "filtering event %i: debug event", idx);
2507 path->delete_event (idx);
2508 }
2509 break;
2510
2511 case EK_CUSTOM:
2512 /* Don't filter custom events. */
2513 break;
2514
2515 case EK_STMT:
2516 {
2517 if (m_verbosity < 4)
2518 {
2519 log (fmt: "filtering event %i: statement event", idx);
2520 path->delete_event (idx);
2521 }
2522 }
2523 break;
2524
2525 case EK_REGION_CREATION:
2526 /* Don't filter these. */
2527 break;
2528
2529 case EK_FUNCTION_ENTRY:
2530 if (m_verbosity < 1)
2531 {
2532 log (fmt: "filtering event %i: function entry", idx);
2533 path->delete_event (idx);
2534 }
2535 break;
2536
2537 case EK_STATE_CHANGE:
2538 {
2539 state_change_event *state_change = (state_change_event *)base_event;
2540 gcc_assert (state_change->m_dst_state.m_region_model);
2541
2542 if (state_change->m_sval == sval)
2543 {
2544 if (state_change->m_origin)
2545 {
2546 if (get_logger ())
2547 {
2548 label_text sval_desc = sval->get_desc ();
2549 label_text origin_sval_desc
2550 = state_change->m_origin->get_desc ();
2551 log (fmt: "event %i:"
2552 " switching var of interest from %qs to %qs",
2553 idx, sval_desc.get (),
2554 origin_sval_desc.get ());
2555 }
2556 sval = state_change->m_origin;
2557 }
2558 log (fmt: "event %i: switching state of interest from %qs to %qs",
2559 idx, state_change->m_to->get_name (),
2560 state_change->m_from->get_name ());
2561 state = state_change->m_from;
2562 }
2563 else if (m_verbosity < 4)
2564 {
2565 if (get_logger ())
2566 {
2567 if (state_change->m_sval)
2568 {
2569 label_text change_sval_desc
2570 = state_change->m_sval->get_desc ();
2571 if (sval)
2572 {
2573 label_text sval_desc = sval->get_desc ();
2574 log (fmt: "filtering event %i:"
2575 " state change to %qs unrelated to %qs",
2576 idx, change_sval_desc.get (),
2577 sval_desc.get ());
2578 }
2579 else
2580 log (fmt: "filtering event %i: state change to %qs",
2581 idx, change_sval_desc.get ());
2582 }
2583 else
2584 log (fmt: "filtering event %i: global state change", idx);
2585 }
2586 path->delete_event (idx);
2587 }
2588 }
2589 break;
2590
2591 case EK_START_CFG_EDGE:
2592 {
2593 cfg_edge_event *event = (cfg_edge_event *)base_event;
2594
2595 /* TODO: is this edge significant to var?
2596 See if var can be in other states in the dest, but not
2597 in other states in the src?
2598 Must have multiple sibling edges. */
2599
2600 if (event->should_filter_p (verbosity: m_verbosity))
2601 {
2602 log (fmt: "filtering events %i and %i: CFG edge", idx, idx + 1);
2603 path->delete_event (idx);
2604 /* Also delete the corresponding EK_END_CFG_EDGE. */
2605 gcc_assert (path->get_checker_event (idx)->m_kind
2606 == EK_END_CFG_EDGE);
2607 path->delete_event (idx);
2608 }
2609 }
2610 break;
2611
2612 case EK_END_CFG_EDGE:
2613 /* These come in pairs with EK_START_CFG_EDGE events and are
2614 filtered when their start event is filtered. */
2615 break;
2616
2617 case EK_CALL_EDGE:
2618 {
2619 call_event *event = (call_event *)base_event;
2620 const region_model *callee_model
2621 = event->m_eedge.m_dest->get_state ().m_region_model;
2622 const region_model *caller_model
2623 = event->m_eedge.m_src->get_state ().m_region_model;
2624 tree callee_var = callee_model->get_representative_tree (sval);
2625 callsite_expr expr;
2626
2627 tree caller_var;
2628 if(event->m_sedge)
2629 {
2630 const callgraph_superedge& cg_superedge
2631 = event->get_callgraph_superedge ();
2632 if (cg_superedge.m_cedge)
2633 caller_var
2634 = cg_superedge.map_expr_from_callee_to_caller (callee_expr: callee_var,
2635 out: &expr);
2636 else
2637 caller_var = caller_model->get_representative_tree (sval);
2638 }
2639 else
2640 caller_var = caller_model->get_representative_tree (sval);
2641
2642 if (caller_var)
2643 {
2644 if (get_logger ())
2645 {
2646 label_text sval_desc = sval->get_desc ();
2647 log (fmt: "event %i:"
2648 " recording critical state for %qs at call"
2649 " from %qE in callee to %qE in caller",
2650 idx, sval_desc.get (), callee_var, caller_var);
2651 }
2652 if (expr.param_p ())
2653 event->record_critical_state (var: caller_var, state);
2654 }
2655 }
2656 break;
2657
2658 case EK_RETURN_EDGE:
2659 {
2660 if (sval)
2661 {
2662 return_event *event = (return_event *)base_event;
2663 const region_model *caller_model
2664 = event->m_eedge.m_dest->get_state ().m_region_model;
2665 tree caller_var = caller_model->get_representative_tree (sval);
2666 const region_model *callee_model
2667 = event->m_eedge.m_src->get_state ().m_region_model;
2668 callsite_expr expr;
2669
2670 tree callee_var;
2671 if (event->m_sedge)
2672 {
2673 const callgraph_superedge& cg_superedge
2674 = event->get_callgraph_superedge ();
2675 if (cg_superedge.m_cedge)
2676 callee_var
2677 = cg_superedge.map_expr_from_caller_to_callee (caller_expr: caller_var,
2678 out: &expr);
2679 else
2680 callee_var = callee_model->get_representative_tree (sval);
2681 }
2682 else
2683 callee_var = callee_model->get_representative_tree (sval);
2684
2685 if (callee_var)
2686 {
2687 if (get_logger ())
2688 {
2689 label_text sval_desc = sval->get_desc ();
2690 log (fmt: "event %i:"
2691 " recording critical state for %qs at return"
2692 " from %qE in caller to %qE in callee",
2693 idx, sval_desc.get (), callee_var, callee_var);
2694 }
2695 if (expr.return_value_p ())
2696 event->record_critical_state (var: callee_var, state);
2697 }
2698 }
2699 }
2700 break;
2701
2702 case EK_INLINED_CALL:
2703 /* We don't expect to see these yet, as they're added later.
2704 We'd want to keep them around. */
2705 break;
2706
2707 case EK_SETJMP:
2708 /* TODO: only show setjmp_events that matter i.e. those for which
2709 there is a later rewind event using them. */
2710 case EK_REWIND_FROM_LONGJMP:
2711 case EK_REWIND_TO_SETJMP:
2712 break;
2713
2714 case EK_WARNING:
2715 /* Always show the final "warning" event in the path. */
2716 break;
2717 }
2718 idx--;
2719 }
2720}
2721
2722/* Subroutine of diagnostic_manager::prune_for_sm_diagnostic.
2723 If *EXPR is not suitable to be the expression of interest in
2724 an sm-diagnostic, set *EXPR to NULL and log. */
2725
2726void
2727diagnostic_manager::update_for_unsuitable_sm_exprs (tree *expr) const
2728{
2729 gcc_assert (expr);
2730 if (*expr && !can_be_expr_of_interest_p (expr: *expr))
2731 {
2732 log (fmt: "new var %qE is unsuitable; setting var to NULL", *expr);
2733 *expr = NULL_TREE;
2734 }
2735}
2736
2737/* Second pass of diagnostic_manager::prune_path: remove redundant
2738 interprocedural information.
2739
2740 For example, given:
2741 (1)- calling "f2" from "f1"
2742 (2)--- entry to "f2"
2743 (3)--- calling "f3" from "f2"
2744 (4)----- entry to "f3"
2745 (5)--- returning to "f2" to "f3"
2746 (6)- returning to "f1" to "f2"
2747 with no other intervening events, then none of these events are
2748 likely to be interesting to the user.
2749
2750 Prune [..., call, function-entry, return, ...] triples repeatedly
2751 until nothing has changed. For the example above, this would
2752 remove events (3, 4, 5), and then remove events (1, 2, 6). */
2753
2754void
2755diagnostic_manager::prune_interproc_events (checker_path *path) const
2756{
2757 bool changed = false;
2758 do
2759 {
2760 changed = false;
2761 int idx = (signed)path->num_events () - 1;
2762 while (idx >= 0)
2763 {
2764 /* Prune [..., call, function-entry, return, ...] triples. */
2765 if (idx + 2 < (signed)path->num_events ()
2766 && path->get_checker_event (idx)->is_call_p ()
2767 && path->get_checker_event (idx: idx + 1)->is_function_entry_p ()
2768 && path->get_checker_event (idx: idx + 2)->is_return_p ())
2769 {
2770 if (get_logger ())
2771 {
2772 label_text desc
2773 (path->get_checker_event (idx)->get_desc (can_colorize: false));
2774 log (fmt: "filtering events %i-%i:"
2775 " irrelevant call/entry/return: %s",
2776 idx, idx + 2, desc.get ());
2777 }
2778 path->delete_event (idx: idx + 2);
2779 path->delete_event (idx: idx + 1);
2780 path->delete_event (idx);
2781 changed = true;
2782 idx--;
2783 continue;
2784 }
2785
2786 /* Prune [..., call, return, ...] pairs
2787 (for -fanalyzer-verbosity=0). */
2788 if (idx + 1 < (signed)path->num_events ()
2789 && path->get_checker_event (idx)->is_call_p ()
2790 && path->get_checker_event (idx: idx + 1)->is_return_p ())
2791 {
2792 if (get_logger ())
2793 {
2794 label_text desc
2795 (path->get_checker_event (idx)->get_desc (can_colorize: false));
2796 log (fmt: "filtering events %i-%i:"
2797 " irrelevant call/return: %s",
2798 idx, idx + 1, desc.get ());
2799 }
2800 path->delete_event (idx: idx + 1);
2801 path->delete_event (idx);
2802 changed = true;
2803 idx--;
2804 continue;
2805 }
2806
2807 idx--;
2808 }
2809
2810 }
2811 while (changed);
2812}
2813
2814/* Remove everything within [call point, IDX]. For consistency,
2815 IDX should represent the return event of the frame to delete,
2816 or if there is none it should be the last event of the frame.
2817 After this function, IDX designates the event prior to calling
2818 this frame. */
2819
2820static void
2821prune_frame (checker_path *path, int &idx)
2822{
2823 gcc_assert (idx >= 0);
2824 int nesting = 1;
2825 if (path->get_checker_event (idx)->is_return_p ())
2826 nesting = 0;
2827 do
2828 {
2829 if (path->get_checker_event (idx)->is_call_p ())
2830 nesting--;
2831 else if (path->get_checker_event (idx)->is_return_p ())
2832 nesting++;
2833
2834 path->delete_event (idx: idx--);
2835 } while (idx >= 0 && nesting != 0);
2836}
2837
2838/* This function is called when fanalyzer-show-events-in-system-headers
2839 is disabled and will prune the diagnostic of all events within a
2840 system header, only keeping the entry and exit events to the header.
2841 This should be called after diagnostic_manager::prune_interproc_events
2842 so that sucessive events [system header call, system header return]
2843 are preserved thereafter.
2844
2845 Given a diagnostics path diving into a system header in the form
2846 [
2847 prefix events...,
2848 system header call,
2849 system header entry,
2850 events within system headers...,
2851 system header return,
2852 suffix events...
2853 ]
2854
2855 then transforms it into
2856 [
2857 prefix events...,
2858 system header call,
2859 system header return,
2860 suffix events...
2861 ]. */
2862
2863void
2864diagnostic_manager::prune_system_headers (checker_path *path) const
2865{
2866 int idx = (signed)path->num_events () - 1;
2867 while (idx >= 0)
2868 {
2869 const checker_event *event = path->get_checker_event (idx);
2870 /* Prune everything between
2871 [..., system entry, (...), system return, ...]. */
2872 if (event->is_return_p ()
2873 && in_system_header_at (loc: event->get_location ()))
2874 {
2875 int ret_idx = idx;
2876 prune_frame (path, idx);
2877
2878 if (get_logger ())
2879 {
2880 log (fmt: "filtering system headers events %i-%i:",
2881 idx, ret_idx);
2882 }
2883 // Delete function entry within system headers.
2884 if (idx >= 0)
2885 {
2886 event = path->get_checker_event (idx);
2887 if (event->is_function_entry_p ()
2888 && in_system_header_at (loc: event->get_location ()))
2889 {
2890 if (get_logger ())
2891 {
2892 label_text desc (event->get_desc (can_colorize: false));
2893 log (fmt: "filtering event %i:"
2894 "system header entry event: %s",
2895 idx, desc.get ());
2896 }
2897
2898 path->delete_event (idx);
2899 }
2900 }
2901 }
2902
2903 idx--;
2904 }
2905}
2906
2907/* Return true iff event IDX within PATH is on the same line as REF_EXP_LOC. */
2908
2909static bool
2910same_line_as_p (const expanded_location &ref_exp_loc,
2911 checker_path *path, unsigned idx)
2912{
2913 const checker_event *ev = path->get_checker_event (idx);
2914 expanded_location idx_exp_loc = expand_location (ev->get_location ());
2915 gcc_assert (ref_exp_loc.file);
2916 if (idx_exp_loc.file == NULL)
2917 return false;
2918 if (strcmp (s1: ref_exp_loc.file, s2: idx_exp_loc.file))
2919 return false;
2920 return ref_exp_loc.line == idx_exp_loc.line;
2921}
2922
2923/* This path-readability optimization reduces the verbosity of compound
2924 conditional statements (without needing to reconstruct the AST, which
2925 has already been lost).
2926
2927 For example, it converts:
2928
2929 | 61 | if (cp[0] != '\0' && cp[0] != '#')
2930 | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2931 | | | | |
2932 | | | | (6) ...to here
2933 | | | (7) following ‘true’ branch...
2934 | | (5) following ‘true’ branch...
2935 | 62 | {
2936 | 63 | alias = cp++;
2937 | | ~~~~
2938 | | |
2939 | | (8) ...to here
2940
2941 into:
2942
2943 | 61 | if (cp[0] != '\0' && cp[0] != '#')
2944 | | ~
2945 | | |
2946 | | (5) following ‘true’ branch...
2947 | 62 | {
2948 | 63 | alias = cp++;
2949 | | ~~~~
2950 | | |
2951 | | (6) ...to here
2952
2953 by combining events 5-8 into new events 5-6.
2954
2955 Find runs of consecutive (start_cfg_edge_event, end_cfg_edge_event) pairs
2956 in which all events apart from the final end_cfg_edge_event are on the same
2957 line, and for which either all the CFG edges are TRUE edges, or all are
2958 FALSE edges.
2959
2960 Consolidate each such run into a
2961 (start_consolidated_cfg_edges_event, end_consolidated_cfg_edges_event)
2962 pair. */
2963
2964void
2965diagnostic_manager::consolidate_conditions (checker_path *path) const
2966{
2967 /* Don't simplify edges if we're debugging them. */
2968 if (flag_analyzer_verbose_edges)
2969 return;
2970
2971 for (int start_idx = 0;
2972 start_idx < (signed)path->num_events () - 1;
2973 start_idx++)
2974 {
2975 if (path->cfg_edge_pair_at_p (idx: start_idx))
2976 {
2977 const checker_event *old_start_ev
2978 = path->get_checker_event (idx: start_idx);
2979 expanded_location start_exp_loc
2980 = expand_location (old_start_ev->get_location ());
2981 if (start_exp_loc.file == NULL)
2982 continue;
2983 if (!same_line_as_p (ref_exp_loc: start_exp_loc, path, idx: start_idx + 1))
2984 continue;
2985
2986 /* Are we looking for a run of all TRUE edges, or all FALSE edges? */
2987 gcc_assert (old_start_ev->m_kind == EK_START_CFG_EDGE);
2988 const start_cfg_edge_event *old_start_cfg_ev
2989 = (const start_cfg_edge_event *)old_start_ev;
2990 const cfg_superedge& first_cfg_sedge
2991 = old_start_cfg_ev->get_cfg_superedge ();
2992 bool edge_sense;
2993 if (first_cfg_sedge.true_value_p ())
2994 edge_sense = true;
2995 else if (first_cfg_sedge.false_value_p ())
2996 edge_sense = false;
2997 else
2998 continue;
2999
3000 /* Find a run of CFG start/end event pairs from
3001 [start_idx, next_idx)
3002 where all apart from the final event are on the same line,
3003 and all are either TRUE or FALSE edges, matching the initial. */
3004 int next_idx = start_idx + 2;
3005 while (path->cfg_edge_pair_at_p (idx: next_idx)
3006 && same_line_as_p (ref_exp_loc: start_exp_loc, path, idx: next_idx))
3007 {
3008 const checker_event *iter_ev
3009 = path->get_checker_event (idx: next_idx);
3010 gcc_assert (iter_ev->m_kind == EK_START_CFG_EDGE);
3011 const start_cfg_edge_event *iter_cfg_ev
3012 = (const start_cfg_edge_event *)iter_ev;
3013 const cfg_superedge& iter_cfg_sedge
3014 = iter_cfg_ev->get_cfg_superedge ();
3015 if (edge_sense)
3016 {
3017 if (!iter_cfg_sedge.true_value_p ())
3018 break;
3019 }
3020 else
3021 {
3022 if (!iter_cfg_sedge.false_value_p ())
3023 break;
3024 }
3025 next_idx += 2;
3026 }
3027
3028 /* If we have more than one pair in the run, consolidate. */
3029 if (next_idx > start_idx + 2)
3030 {
3031 const checker_event *old_end_ev
3032 = path->get_checker_event (idx: next_idx - 1);
3033 log (fmt: "consolidating CFG edge events %i-%i into %i-%i",
3034 start_idx, next_idx - 1, start_idx, start_idx +1);
3035 start_consolidated_cfg_edges_event *new_start_ev
3036 = new start_consolidated_cfg_edges_event
3037 (event_loc_info (old_start_ev->get_location (),
3038 old_start_ev->get_fndecl (),
3039 old_start_ev->get_stack_depth ()),
3040 edge_sense);
3041 checker_event *new_end_ev
3042 = new end_consolidated_cfg_edges_event
3043 (event_loc_info (old_end_ev->get_location (),
3044 old_end_ev->get_fndecl (),
3045 old_end_ev->get_stack_depth ()));
3046 path->replace_event (idx: start_idx, new_event: new_start_ev);
3047 path->replace_event (idx: start_idx + 1, new_event: new_end_ev);
3048 path->delete_events (start_idx: start_idx + 2, len: next_idx - (start_idx + 2));
3049 }
3050 }
3051 }
3052}
3053
3054/* Final pass of diagnostic_manager::prune_path.
3055
3056 If all we're left with is in one function, then filter function entry
3057 events. */
3058
3059void
3060diagnostic_manager::finish_pruning (checker_path *path) const
3061{
3062 if (!path->interprocedural_p ())
3063 {
3064 int idx = path->num_events () - 1;
3065 while (idx >= 0 && idx < (signed)path->num_events ())
3066 {
3067 checker_event *base_event = path->get_checker_event (idx);
3068 if (base_event->m_kind == EK_FUNCTION_ENTRY)
3069 {
3070 log (fmt: "filtering event %i:"
3071 " function entry for purely intraprocedural path", idx);
3072 path->delete_event (idx);
3073 }
3074 idx--;
3075 }
3076 }
3077}
3078
3079} // namespace ana
3080
3081#endif /* #if ENABLE_ANALYZER */
3082

source code of gcc/analyzer/diagnostic-manager.cc