1/* Calculate branch probabilities, and basic block execution counts.
2 Copyright (C) 1990-2025 Free Software Foundation, Inc.
3 Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
4 based on some ideas from Dain Samples of UC Berkeley.
5 Further mangling by Bob Manson, Cygnus Support.
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23/* Generate basic block profile instrumentation and auxiliary files.
24 Profile generation is optimized, so that not all arcs in the basic
25 block graph need instrumenting. First, the BB graph is closed with
26 one entry (function start), and one exit (function exit). Any
27 ABNORMAL_EDGE cannot be instrumented (because there is no control
28 path to place the code). We close the graph by inserting fake
29 EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal
30 edges that do not go to the exit_block. We ignore such abnormal
31 edges. Naturally these fake edges are never directly traversed,
32 and so *cannot* be directly instrumented. Some other graph
33 massaging is done. To optimize the instrumentation we generate the
34 BB minimal span tree, only edges that are not on the span tree
35 (plus the entry point) need instrumenting. From that information
36 all other edge counts can be deduced. By construction all fake
37 edges must be on the spanning tree. We also attempt to place
38 EDGE_CRITICAL edges on the spanning tree.
39
40 The auxiliary files generated are <dumpbase>.gcno (at compile time)
41 and <dumpbase>.gcda (at run time). The format is
42 described in full in gcov-io.h. */
43
44/* ??? Register allocation should use basic block execution counts to
45 give preference to the most commonly executed blocks. */
46
47/* ??? Should calculate branch probabilities before instrumenting code, since
48 then we can use arc counts to help decide which arcs to instrument. */
49
50#include "config.h"
51#include "system.h"
52#include "coretypes.h"
53#include "backend.h"
54#include "rtl.h"
55#include "tree.h"
56#include "gimple.h"
57#include "cfghooks.h"
58#include "cgraph.h"
59#include "coverage.h"
60#include "diagnostic-core.h"
61#include "cfganal.h"
62#include "value-prof.h"
63#include "gimple-iterator.h"
64#include "tree-cfg.h"
65#include "dumpfile.h"
66#include "cfgloop.h"
67#include "sreal.h"
68#include "file-prefix-map.h"
69
70#include "profile.h"
71
72struct condcov;
73struct condcov *find_conditions (struct function*);
74size_t cov_length (const struct condcov*);
75array_slice<basic_block> cov_blocks (struct condcov*, size_t);
76array_slice<uint64_t> cov_masks (struct condcov*, size_t);
77array_slice<sbitmap> cov_maps (struct condcov* cov, size_t n);
78void cov_free (struct condcov*);
79size_t instrument_decisions (array_slice<basic_block>, size_t,
80 array_slice<sbitmap>,
81 array_slice<gcov_type_unsigned>);
82
83/* Map from BBs/edges to gcov counters. */
84vec<gcov_type> bb_gcov_counts;
85hash_map<edge,gcov_type> *edge_gcov_counts;
86
87struct bb_profile_info {
88 unsigned int count_valid : 1;
89
90 /* Number of successor and predecessor edges. */
91 gcov_type succ_count;
92 gcov_type pred_count;
93};
94
95#define BB_INFO(b) ((struct bb_profile_info *) (b)->aux)
96
97
98/* Counter summary from the last set of coverage counts read. */
99
100gcov_summary *profile_info;
101
102/* Collect statistics on the performance of this pass for the entire source
103 file. */
104
105static int total_num_blocks;
106static int total_num_edges;
107static int total_num_edges_ignored;
108static int total_num_edges_instrumented;
109static int total_num_blocks_created;
110static int total_num_passes;
111static int total_num_times_called;
112static int total_hist_br_prob[20];
113static int total_num_branches;
114static int total_num_conds;
115
116/* Forward declarations. */
117static void find_spanning_tree (struct edge_list *);
118
119/* Add edge instrumentation code to the entire insn chain.
120
121 F is the first insn of the chain.
122 NUM_BLOCKS is the number of basic blocks found in F. */
123
124static unsigned
125instrument_edges (struct edge_list *el)
126{
127 unsigned num_instr_edges = 0;
128 int num_edges = NUM_EDGES (el);
129 basic_block bb;
130
131 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
132 {
133 edge e;
134 edge_iterator ei;
135
136 FOR_EACH_EDGE (e, ei, bb->succs)
137 {
138 struct edge_profile_info *inf = EDGE_INFO (e);
139
140 if (!inf->ignore && !inf->on_tree)
141 {
142 gcc_assert (!(e->flags & EDGE_ABNORMAL));
143 if (dump_file)
144 fprintf (stream: dump_file, format: "Edge %d to %d instrumented%s\n",
145 e->src->index, e->dest->index,
146 EDGE_CRITICAL_P (e) ? " (and split)" : "");
147 gimple_gen_edge_profiler (num_instr_edges++, e);
148 }
149 }
150 }
151
152 total_num_blocks_created += num_edges;
153 if (dump_file)
154 fprintf (stream: dump_file, format: "%d edges instrumented\n", num_instr_edges);
155 return num_instr_edges;
156}
157
158/* Add code to measure histograms for values in list VALUES. */
159static void
160instrument_values (histogram_values values)
161{
162 unsigned i;
163
164 /* Emit code to generate the histograms before the insns. */
165
166 for (i = 0; i < values.length (); i++)
167 {
168 histogram_value hist = values[i];
169 unsigned t = COUNTER_FOR_HIST_TYPE (hist->type);
170
171 if (!coverage_counter_alloc (t, hist->n_counters))
172 continue;
173
174 switch (hist->type)
175 {
176 case HIST_TYPE_INTERVAL:
177 gimple_gen_interval_profiler (hist, t);
178 break;
179
180 case HIST_TYPE_POW2:
181 gimple_gen_pow2_profiler (hist, t);
182 break;
183
184 case HIST_TYPE_TOPN_VALUES:
185 gimple_gen_topn_values_profiler (hist, t);
186 break;
187
188 case HIST_TYPE_INDIR_CALL:
189 gimple_gen_ic_profiler (hist, t);
190 break;
191
192 case HIST_TYPE_AVERAGE:
193 gimple_gen_average_profiler (hist, t);
194 break;
195
196 case HIST_TYPE_IOR:
197 gimple_gen_ior_profiler (hist, t);
198 break;
199
200 case HIST_TYPE_TIME_PROFILE:
201 gimple_gen_time_profiler (t);
202 break;
203
204 default:
205 gcc_unreachable ();
206 }
207 }
208}
209
210
211/* Computes hybrid profile for all matching entries in da_file.
212
213 CFG_CHECKSUM is the precomputed checksum for the CFG. */
214
215static gcov_type *
216get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
217{
218 unsigned num_edges = 0;
219 basic_block bb;
220 gcov_type *counts;
221
222 /* Count the edges to be (possibly) instrumented. */
223 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
224 {
225 edge e;
226 edge_iterator ei;
227
228 FOR_EACH_EDGE (e, ei, bb->succs)
229 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
230 num_edges++;
231 }
232
233 counts = get_coverage_counts (GCOV_COUNTER_ARCS, cfg_checksum,
234 lineno_checksum, num_edges);
235 if (!counts)
236 return NULL;
237
238 return counts;
239}
240
241static bool
242is_edge_inconsistent (vec<edge, va_gc> *edges)
243{
244 edge e;
245 edge_iterator ei;
246 FOR_EACH_EDGE (e, ei, edges)
247 {
248 if (!EDGE_INFO (e)->ignore)
249 {
250 if (edge_gcov_count (e) < 0
251 && (!(e->flags & EDGE_FAKE)
252 || !block_ends_with_call_p (bb: e->src)))
253 {
254 if (dump_file)
255 {
256 fprintf (stream: dump_file,
257 format: "Edge %i->%i is inconsistent, count%" PRId64,
258 e->src->index, e->dest->index, edge_gcov_count (e));
259 dump_bb (dump_file, e->src, 0, TDF_DETAILS);
260 dump_bb (dump_file, e->dest, 0, TDF_DETAILS);
261 }
262 return true;
263 }
264 }
265 }
266 return false;
267}
268
269static void
270correct_negative_edge_counts (void)
271{
272 basic_block bb;
273 edge e;
274 edge_iterator ei;
275
276 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
277 {
278 FOR_EACH_EDGE (e, ei, bb->succs)
279 {
280 if (edge_gcov_count (e) < 0)
281 edge_gcov_count (e) = 0;
282 }
283 }
284}
285
286/* Check consistency.
287 Return true if inconsistency is found. */
288static bool
289is_inconsistent (void)
290{
291 basic_block bb;
292 bool inconsistent = false;
293 FOR_EACH_BB_FN (bb, cfun)
294 {
295 inconsistent |= is_edge_inconsistent (edges: bb->preds);
296 if (!dump_file && inconsistent)
297 return true;
298 inconsistent |= is_edge_inconsistent (edges: bb->succs);
299 if (!dump_file && inconsistent)
300 return true;
301 if (bb_gcov_count (bb) < 0)
302 {
303 if (dump_file)
304 {
305 fprintf (stream: dump_file, format: "BB %i count is negative "
306 "%" PRId64,
307 bb->index,
308 bb_gcov_count (bb));
309 dump_bb (dump_file, bb, 0, TDF_DETAILS);
310 }
311 inconsistent = true;
312 }
313 if (bb_gcov_count (bb) != sum_edge_counts (edges: bb->preds))
314 {
315 if (dump_file)
316 {
317 fprintf (stream: dump_file, format: "BB %i count does not match sum of incoming edges "
318 "%" PRId64" should be %" PRId64,
319 bb->index,
320 bb_gcov_count (bb),
321 sum_edge_counts (edges: bb->preds));
322 dump_bb (dump_file, bb, 0, TDF_DETAILS);
323 }
324 inconsistent = true;
325 }
326 if (bb_gcov_count (bb) != sum_edge_counts (edges: bb->succs) &&
327 ! (find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)) != NULL
328 && block_ends_with_call_p (bb)))
329 {
330 if (dump_file)
331 {
332 fprintf (stream: dump_file, format: "BB %i count does not match sum of outgoing edges "
333 "%" PRId64" should be %" PRId64,
334 bb->index,
335 bb_gcov_count (bb),
336 sum_edge_counts (edges: bb->succs));
337 dump_bb (dump_file, bb, 0, TDF_DETAILS);
338 }
339 inconsistent = true;
340 }
341 if (!dump_file && inconsistent)
342 return true;
343 }
344
345 return inconsistent;
346}
347
348/* Set each basic block count to the sum of its outgoing edge counts */
349static void
350set_bb_counts (void)
351{
352 basic_block bb;
353 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
354 {
355 bb_gcov_count (bb) = sum_edge_counts (edges: bb->succs);
356 gcc_assert (bb_gcov_count (bb) >= 0);
357 }
358}
359
360/* Reads profile data and returns total number of edge counts read */
361static int
362read_profile_edge_counts (gcov_type *exec_counts)
363{
364 basic_block bb;
365 int num_edges = 0;
366 int exec_counts_pos = 0;
367 /* For each edge not on the spanning tree, set its execution count from
368 the .da file. */
369 /* The first count in the .da file is the number of times that the function
370 was entered. This is the exec_count for block zero. */
371
372 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
373 {
374 edge e;
375 edge_iterator ei;
376
377 FOR_EACH_EDGE (e, ei, bb->succs)
378 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
379 {
380 num_edges++;
381 if (exec_counts)
382 edge_gcov_count (e) = exec_counts[exec_counts_pos++];
383 else
384 edge_gcov_count (e) = 0;
385
386 EDGE_INFO (e)->count_valid = 1;
387 BB_INFO (bb)->succ_count--;
388 BB_INFO (e->dest)->pred_count--;
389 if (dump_file)
390 {
391 fprintf (stream: dump_file, format: "\nRead edge from %i to %i, count:",
392 bb->index, e->dest->index);
393 fprintf (stream: dump_file, format: "%" PRId64,
394 (int64_t) edge_gcov_count (e));
395 }
396 }
397 }
398
399 return num_edges;
400}
401
402/* BB statistics comparing guessed frequency of BB with feedback. */
403
404struct bb_stats
405{
406 basic_block bb;
407 double guessed, feedback;
408 int64_t count;
409};
410
411/* Compare limit_tuple intervals by first item in descending order. */
412
413static int
414cmp_stats (const void *ptr1, const void *ptr2)
415{
416 const bb_stats *p1 = (const bb_stats *)ptr1;
417 const bb_stats *p2 = (const bb_stats *)ptr2;
418
419 if (p1->feedback < p2->feedback)
420 return 1;
421 else if (p1->feedback > p2->feedback)
422 return -1;
423 return 0;
424}
425
426
427/* Compute the branch probabilities for the various branches.
428 Annotate them accordingly.
429
430 CFG_CHECKSUM is the precomputed checksum for the CFG. */
431
432static void
433compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
434{
435 basic_block bb;
436 int i;
437 int num_edges = 0;
438 int changes;
439 int passes;
440 int hist_br_prob[20];
441 int num_branches;
442 gcov_type *exec_counts = get_exec_counts (cfg_checksum, lineno_checksum);
443 int inconsistent = 0;
444
445 /* Very simple sanity checks so we catch bugs in our profiling code. */
446 if (!profile_info)
447 {
448 if (dump_file)
449 fprintf (stream: dump_file, format: "Profile info is missing; giving up\n");
450 return;
451 }
452
453 bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun), exact: true);
454 edge_gcov_counts = new hash_map<edge,gcov_type>;
455
456 /* Attach extra info block to each bb. */
457 alloc_aux_for_blocks (sizeof (struct bb_profile_info));
458 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
459 {
460 edge e;
461 edge_iterator ei;
462
463 FOR_EACH_EDGE (e, ei, bb->succs)
464 if (!EDGE_INFO (e)->ignore)
465 BB_INFO (bb)->succ_count++;
466 FOR_EACH_EDGE (e, ei, bb->preds)
467 if (!EDGE_INFO (e)->ignore)
468 BB_INFO (bb)->pred_count++;
469 }
470
471 /* Avoid predicting entry on exit nodes. */
472 BB_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->succ_count = 2;
473 BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (cfun))->pred_count = 2;
474
475 num_edges = read_profile_edge_counts (exec_counts);
476
477 if (dump_file)
478 fprintf (stream: dump_file, format: "\n%d edge counts read\n", num_edges);
479
480 /* For every block in the file,
481 - if every exit/entrance edge has a known count, then set the block count
482 - if the block count is known, and every exit/entrance edge but one has
483 a known execution count, then set the count of the remaining edge
484
485 As edge counts are set, decrement the succ/pred count, but don't delete
486 the edge, that way we can easily tell when all edges are known, or only
487 one edge is unknown. */
488
489 /* The order that the basic blocks are iterated through is important.
490 Since the code that finds spanning trees starts with block 0, low numbered
491 edges are put on the spanning tree in preference to high numbered edges.
492 Hence, most instrumented edges are at the end. Graph solving works much
493 faster if we propagate numbers from the end to the start.
494
495 This takes an average of slightly more than 3 passes. */
496
497 changes = 1;
498 passes = 0;
499 while (changes)
500 {
501 passes++;
502 changes = 0;
503 FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, prev_bb)
504 {
505 struct bb_profile_info *bi = BB_INFO (bb);
506 if (! bi->count_valid)
507 {
508 if (bi->succ_count == 0)
509 {
510 edge e;
511 edge_iterator ei;
512 gcov_type total = 0;
513
514 FOR_EACH_EDGE (e, ei, bb->succs)
515 total += edge_gcov_count (e);
516 bb_gcov_count (bb) = total;
517 bi->count_valid = 1;
518 changes = 1;
519 }
520 else if (bi->pred_count == 0)
521 {
522 edge e;
523 edge_iterator ei;
524 gcov_type total = 0;
525
526 FOR_EACH_EDGE (e, ei, bb->preds)
527 total += edge_gcov_count (e);
528 bb_gcov_count (bb) = total;
529 bi->count_valid = 1;
530 changes = 1;
531 }
532 }
533 if (bi->count_valid)
534 {
535 if (bi->succ_count == 1)
536 {
537 edge e;
538 edge_iterator ei;
539 gcov_type total = 0;
540
541 /* One of the counts will be invalid, but it is zero,
542 so adding it in also doesn't hurt. */
543 FOR_EACH_EDGE (e, ei, bb->succs)
544 total += edge_gcov_count (e);
545
546 /* Search for the invalid edge, and set its count. */
547 FOR_EACH_EDGE (e, ei, bb->succs)
548 if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore)
549 break;
550
551 /* Calculate count for remaining edge by conservation. */
552 total = bb_gcov_count (bb) - total;
553
554 gcc_assert (e);
555 EDGE_INFO (e)->count_valid = 1;
556 edge_gcov_count (e) = total;
557 bi->succ_count--;
558
559 BB_INFO (e->dest)->pred_count--;
560 changes = 1;
561 }
562 if (bi->pred_count == 1)
563 {
564 edge e;
565 edge_iterator ei;
566 gcov_type total = 0;
567
568 /* One of the counts will be invalid, but it is zero,
569 so adding it in also doesn't hurt. */
570 FOR_EACH_EDGE (e, ei, bb->preds)
571 total += edge_gcov_count (e);
572
573 /* Search for the invalid edge, and set its count. */
574 FOR_EACH_EDGE (e, ei, bb->preds)
575 if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore)
576 break;
577
578 /* Calculate count for remaining edge by conservation. */
579 total = bb_gcov_count (bb) - total + edge_gcov_count (e);
580
581 gcc_assert (e);
582 EDGE_INFO (e)->count_valid = 1;
583 edge_gcov_count (e) = total;
584 bi->pred_count--;
585
586 BB_INFO (e->src)->succ_count--;
587 changes = 1;
588 }
589 }
590 }
591 }
592
593 total_num_passes += passes;
594 if (dump_file)
595 fprintf (stream: dump_file, format: "Graph solving took %d passes.\n\n", passes);
596
597 /* If the graph has been correctly solved, every block will have a
598 succ and pred count of zero. */
599 FOR_EACH_BB_FN (bb, cfun)
600 {
601 gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
602 }
603
604 /* Check for inconsistent basic block counts */
605 inconsistent = is_inconsistent ();
606
607 if (inconsistent)
608 {
609 if (flag_profile_correction)
610 {
611 /* Inconsistency detected. Make it flow-consistent. */
612 static int informed = 0;
613 if (dump_enabled_p () && informed == 0)
614 {
615 informed = 1;
616 dump_printf_loc (MSG_NOTE,
617 dump_user_location_t::from_location_t (loc: input_location),
618 "correcting inconsistent profile data\n");
619 }
620 correct_negative_edge_counts ();
621 /* Set bb counts to the sum of the outgoing edge counts */
622 set_bb_counts ();
623 if (dump_file)
624 fprintf (stream: dump_file, format: "\nCalling mcf_smooth_cfg\n");
625 mcf_smooth_cfg ();
626 }
627 else
628 error ("corrupted profile info: profile data is not flow-consistent");
629 }
630
631 /* For every edge, calculate its branch probability and add a reg_note
632 to the branch insn to indicate this. */
633
634 for (i = 0; i < 20; i++)
635 hist_br_prob[i] = 0;
636 num_branches = 0;
637
638 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
639 {
640 edge e;
641 edge_iterator ei;
642
643 if (bb_gcov_count (bb) < 0)
644 {
645 error ("corrupted profile info: number of iterations for basic block %d thought to be %i",
646 bb->index, (int)bb_gcov_count (bb));
647 bb_gcov_count (bb) = 0;
648 }
649 FOR_EACH_EDGE (e, ei, bb->succs)
650 {
651 /* Function may return twice in the cased the called function is
652 setjmp or calls fork, but we can't represent this by extra
653 edge from the entry, since extra edge from the exit is
654 already present. We get negative frequency from the entry
655 point. */
656 if ((edge_gcov_count (e) < 0
657 && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
658 || (edge_gcov_count (e) > bb_gcov_count (bb)
659 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
660 {
661 if (block_ends_with_call_p (bb))
662 edge_gcov_count (e) = edge_gcov_count (e) < 0
663 ? 0 : bb_gcov_count (bb);
664 }
665 if (edge_gcov_count (e) < 0
666 || edge_gcov_count (e) > bb_gcov_count (bb))
667 {
668 error ("corrupted profile info: number of executions for edge %d-%d thought to be %i",
669 e->src->index, e->dest->index,
670 (int)edge_gcov_count (e));
671 edge_gcov_count (e) = bb_gcov_count (bb) / 2;
672 }
673 }
674 if (bb_gcov_count (bb))
675 {
676 bool set_to_guessed = false;
677 FOR_EACH_EDGE (e, ei, bb->succs)
678 {
679 bool prev_never = e->probability == profile_probability::never ();
680 e->probability = profile_probability::probability_in_gcov_type
681 (val1: edge_gcov_count (e), val2: bb_gcov_count (bb));
682 if (e->probability == profile_probability::never ()
683 && !prev_never
684 && flag_profile_partial_training)
685 set_to_guessed = true;
686 }
687 if (set_to_guessed)
688 FOR_EACH_EDGE (e, ei, bb->succs)
689 e->probability = e->probability.guessed ();
690 if (bb->index >= NUM_FIXED_BLOCKS
691 && block_ends_with_condjump_p (bb)
692 && EDGE_COUNT (bb->succs) >= 2)
693 {
694 int prob;
695 edge e;
696 int index;
697
698 /* Find the branch edge. It is possible that we do have fake
699 edges here. */
700 FOR_EACH_EDGE (e, ei, bb->succs)
701 if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU)))
702 break;
703
704 prob = e->probability.to_reg_br_prob_base ();
705 index = prob * 20 / REG_BR_PROB_BASE;
706
707 if (index == 20)
708 index = 19;
709 hist_br_prob[index]++;
710
711 num_branches++;
712 }
713 }
714 /* As a last resort, distribute the probabilities evenly.
715 Use simple heuristics that if there are normal edges,
716 give all abnormals frequency of 0, otherwise distribute the
717 frequency over abnormals (this is the case of noreturn
718 calls). */
719 else if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
720 {
721 int total = 0;
722
723 FOR_EACH_EDGE (e, ei, bb->succs)
724 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
725 total ++;
726 if (total)
727 {
728 FOR_EACH_EDGE (e, ei, bb->succs)
729 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
730 e->probability
731 = profile_probability::guessed_always () / total;
732 else
733 e->probability = profile_probability::never ();
734 }
735 else
736 {
737 total += EDGE_COUNT (bb->succs);
738 FOR_EACH_EDGE (e, ei, bb->succs)
739 e->probability = profile_probability::guessed_always () / total;
740 }
741 if (bb->index >= NUM_FIXED_BLOCKS
742 && block_ends_with_condjump_p (bb)
743 && EDGE_COUNT (bb->succs) >= 2)
744 num_branches++;
745 }
746 }
747
748 if (exec_counts
749 && (bb_gcov_count (ENTRY_BLOCK_PTR_FOR_FN (cfun))
750 || !flag_profile_partial_training))
751 profile_status_for_fn (cfun) = PROFILE_READ;
752
753 /* If we have real data, use them! */
754 if (bb_gcov_count (ENTRY_BLOCK_PTR_FOR_FN (cfun))
755 || !flag_guess_branch_prob)
756 {
757 profile_count old_entry_cnt = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
758 auto_vec <bb_stats> stats;
759 double sum1 = 0, sum2 = 0;
760
761 FOR_ALL_BB_FN (bb, cfun)
762 {
763 profile_count cnt = bb->count;
764 if (bb_gcov_count (bb) || !flag_profile_partial_training)
765 bb->count = profile_count::from_gcov_type (v: bb_gcov_count (bb));
766 else
767 bb->count = profile_count::guessed_zero ();
768
769 if (dump_file && (dump_flags & TDF_DETAILS) && bb->index >= 0)
770 {
771 double freq1 = cnt.to_sreal_scale (in: old_entry_cnt).to_double ();
772 double freq2 = bb->count.to_sreal_scale
773 (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count).
774 to_double ();
775 bb_stats stat = {.bb: bb, .guessed: freq1, .feedback: freq2,
776 .count: (int64_t) bb_gcov_count (bb)};
777 stats.safe_push (obj: stat);
778 sum1 += freq1;
779 sum2 += freq2;
780 }
781 }
782 if (dump_file && (dump_flags & TDF_DETAILS))
783 {
784 double nsum1 = 0, nsum2 = 0;
785 stats.qsort (cmp_stats);
786 for (auto stat : stats)
787 {
788 nsum1 += stat.guessed;
789 nsum2 += stat.feedback;
790 fprintf (stream: dump_file,
791 format: " Basic block %4i guessed freq: %12.3f"
792 " cumulative:%6.2f%% "
793 " feedback freq: %12.3f cumulative:%7.2f%%"
794 " cnt: 10%" PRId64 "\n", stat.bb->index,
795 stat.guessed,
796 nsum1 * 100 / sum1,
797 stat.feedback,
798 nsum2 * 100 / sum2,
799 stat.count);
800 }
801 }
802 }
803 /* If function was not trained, preserve local estimates including statically
804 determined zero counts. */
805 else if (profile_status_for_fn (cfun) == PROFILE_READ
806 && !flag_profile_partial_training)
807 FOR_ALL_BB_FN (bb, cfun)
808 if (!(bb->count == profile_count::zero ()))
809 bb->count = bb->count.global0 ();
810
811 bb_gcov_counts.release ();
812 delete edge_gcov_counts;
813 edge_gcov_counts = NULL;
814
815 update_max_bb_count ();
816
817 if (dump_file)
818 {
819 fprintf (stream: dump_file, format: " Profile feedback for function");
820 fprintf (stream: dump_file, format: ((profile_status_for_fn (cfun) == PROFILE_READ)
821 ? " is available \n"
822 : " is not available \n"));
823
824 fprintf (stream: dump_file, format: "%d branches\n", num_branches);
825 if (num_branches)
826 for (i = 0; i < 10; i++)
827 fprintf (stream: dump_file, format: "%d%% branches in range %d-%d%%\n",
828 (hist_br_prob[i] + hist_br_prob[19-i]) * 100 / num_branches,
829 5 * i, 5 * i + 5);
830
831 total_num_branches += num_branches;
832 for (i = 0; i < 20; i++)
833 total_hist_br_prob[i] += hist_br_prob[i];
834
835 fputc (c: '\n', stream: dump_file);
836 fputc (c: '\n', stream: dump_file);
837
838 gimple_dump_cfg (dump_file, TDF_BLOCKS);
839 }
840
841 free_aux_for_blocks ();
842}
843
844/* Sort the histogram value and count for TOPN and INDIR_CALL type. */
845
846static void
847sort_hist_values (histogram_value hist)
848{
849 gcc_assert (hist->type == HIST_TYPE_TOPN_VALUES
850 || hist->type == HIST_TYPE_INDIR_CALL);
851
852 int counters = hist->hvalue.counters[1];
853 for (int i = 0; i < counters - 1; i++)
854 /* Hist value is organized as:
855 [total_executions, N, value1, counter1, ..., valueN, counterN]
856 Use decrease bubble sort to rearrange it. The sort starts from <value1,
857 counter1> and compares counter first. If counter is same, compares the
858 value, exchange it if small to keep stable. */
859
860 {
861 bool swapped = false;
862 for (int j = 0; j < counters - 1 - i; j++)
863 {
864 gcov_type *p = &hist->hvalue.counters[2 * j + 2];
865 if (p[1] < p[3] || (p[1] == p[3] && p[0] < p[2]))
866 {
867 std::swap (a&: p[0], b&: p[2]);
868 std::swap (a&: p[1], b&: p[3]);
869 swapped = true;
870 }
871 }
872 if (!swapped)
873 break;
874 }
875}
876/* Load value histograms values whose description is stored in VALUES array
877 from .gcda file.
878
879 CFG_CHECKSUM is the precomputed checksum for the CFG. */
880
881static void
882compute_value_histograms (histogram_values values, unsigned cfg_checksum,
883 unsigned lineno_checksum)
884{
885 unsigned i, j, t, any;
886 unsigned n_histogram_counters[GCOV_N_VALUE_COUNTERS];
887 gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS];
888 gcov_type *act_count[GCOV_N_VALUE_COUNTERS];
889 gcov_type *aact_count;
890 struct cgraph_node *node;
891
892 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
893 n_histogram_counters[t] = 0;
894
895 for (i = 0; i < values.length (); i++)
896 {
897 histogram_value hist = values[i];
898 n_histogram_counters[(int) hist->type] += hist->n_counters;
899 }
900
901 any = 0;
902 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
903 {
904 if (!n_histogram_counters[t])
905 {
906 histogram_counts[t] = NULL;
907 continue;
908 }
909
910 histogram_counts[t] = get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
911 cfg_checksum,
912 lineno_checksum,
913 n_histogram_counters[t]);
914 if (histogram_counts[t])
915 any = 1;
916 act_count[t] = histogram_counts[t];
917 }
918 if (!any)
919 return;
920
921 for (i = 0; i < values.length (); i++)
922 {
923 histogram_value hist = values[i];
924 gimple *stmt = hist->hvalue.stmt;
925
926 t = (int) hist->type;
927 bool topn_p = (hist->type == HIST_TYPE_TOPN_VALUES
928 || hist->type == HIST_TYPE_INDIR_CALL);
929
930 /* TOP N counter uses variable number of counters. */
931 if (topn_p)
932 {
933 unsigned total_size;
934 if (act_count[t])
935 total_size = 2 + 2 * act_count[t][1];
936 else
937 total_size = 2;
938 gimple_add_histogram_value (cfun, stmt, hist);
939 hist->n_counters = total_size;
940 hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
941 for (j = 0; j < hist->n_counters; j++)
942 if (act_count[t])
943 hist->hvalue.counters[j] = act_count[t][j];
944 else
945 hist->hvalue.counters[j] = 0;
946 act_count[t] += hist->n_counters;
947 sort_hist_values (hist);
948 }
949 else
950 {
951 aact_count = act_count[t];
952
953 if (act_count[t])
954 act_count[t] += hist->n_counters;
955
956 gimple_add_histogram_value (cfun, stmt, hist);
957 hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
958 for (j = 0; j < hist->n_counters; j++)
959 if (aact_count)
960 hist->hvalue.counters[j] = aact_count[j];
961 else
962 hist->hvalue.counters[j] = 0;
963 }
964
965 /* Time profiler counter is not related to any statement,
966 so that we have to read the counter and set the value to
967 the corresponding call graph node. */
968 if (hist->type == HIST_TYPE_TIME_PROFILE)
969 {
970 node = cgraph_node::get (decl: hist->fun->decl);
971 if (hist->hvalue.counters[0] >= 0
972 && hist->hvalue.counters[0] < INT_MAX / 2)
973 node->tp_first_run = hist->hvalue.counters[0];
974 else
975 {
976 if (flag_profile_correction)
977 error ("corrupted profile info: invalid time profile");
978 node->tp_first_run = 0;
979 }
980
981 /* Drop profile for -fprofile-reproducible=multithreaded. */
982 bool drop
983 = (flag_profile_reproducible == PROFILE_REPRODUCIBILITY_MULTITHREADED);
984 if (drop)
985 node->tp_first_run = 0;
986
987 if (dump_file)
988 fprintf (stream: dump_file, format: "Read tp_first_run: %d%s\n", node->tp_first_run,
989 drop ? "; ignored because profile reproducibility is "
990 "multi-threaded" : "");
991 }
992 }
993
994 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
995 free (ptr: histogram_counts[t]);
996}
997
998/* Location triplet which records a location. */
999struct location_triplet
1000{
1001 const char *filename;
1002 int lineno;
1003 int bb_index;
1004};
1005
1006/* Traits class for streamed_locations hash set below. */
1007
1008struct location_triplet_hash : typed_noop_remove <location_triplet>
1009{
1010 typedef location_triplet value_type;
1011 typedef location_triplet compare_type;
1012
1013 static hashval_t
1014 hash (const location_triplet &ref)
1015 {
1016 inchash::hash hstate (0);
1017 if (ref.filename)
1018 hstate.add_int (v: strlen (s: ref.filename));
1019 hstate.add_int (v: ref.lineno);
1020 hstate.add_int (v: ref.bb_index);
1021 return hstate.end ();
1022 }
1023
1024 static bool
1025 equal (const location_triplet &ref1, const location_triplet &ref2)
1026 {
1027 return ref1.lineno == ref2.lineno
1028 && ref1.bb_index == ref2.bb_index
1029 && ref1.filename != NULL
1030 && ref2.filename != NULL
1031 && strcmp (s1: ref1.filename, s2: ref2.filename) == 0;
1032 }
1033
1034 static void
1035 mark_deleted (location_triplet &ref)
1036 {
1037 ref.lineno = -1;
1038 }
1039
1040 static const bool empty_zero_p = false;
1041
1042 static void
1043 mark_empty (location_triplet &ref)
1044 {
1045 ref.lineno = -2;
1046 }
1047
1048 static bool
1049 is_deleted (const location_triplet &ref)
1050 {
1051 return ref.lineno == -1;
1052 }
1053
1054 static bool
1055 is_empty (const location_triplet &ref)
1056 {
1057 return ref.lineno == -2;
1058 }
1059};
1060
1061
1062
1063
1064/* When passed NULL as file_name, initialize.
1065 When passed something else, output the necessary commands to change
1066 line to LINE and offset to FILE_NAME. */
1067static void
1068output_location (hash_set<location_triplet_hash> *streamed_locations,
1069 char const *file_name, int line,
1070 gcov_position_t *offset, basic_block bb)
1071{
1072 static char const *prev_file_name;
1073 static int prev_line;
1074 bool name_differs, line_differs;
1075
1076 if (file_name != NULL)
1077 file_name = remap_profile_filename (file_name);
1078
1079 location_triplet triplet;
1080 triplet.filename = file_name;
1081 triplet.lineno = line;
1082 triplet.bb_index = bb ? bb->index : 0;
1083
1084 if (streamed_locations->add (k: triplet))
1085 return;
1086
1087 if (!file_name)
1088 {
1089 prev_file_name = NULL;
1090 prev_line = -1;
1091 return;
1092 }
1093
1094 name_differs = !prev_file_name || filename_cmp (s1: file_name, s2: prev_file_name);
1095 line_differs = prev_line != line;
1096
1097 if (!*offset)
1098 {
1099 *offset = gcov_write_tag (GCOV_TAG_LINES);
1100 gcov_write_unsigned (bb->index);
1101 name_differs = line_differs = true;
1102 }
1103
1104 /* If this is a new source file, then output the
1105 file's name to the .bb file. */
1106 if (name_differs)
1107 {
1108 prev_file_name = file_name;
1109 gcov_write_unsigned (0);
1110 gcov_write_filename (prev_file_name);
1111 }
1112 if (line_differs)
1113 {
1114 gcov_write_unsigned (line);
1115 prev_line = line;
1116 }
1117}
1118
1119/* Helper for qsort so edges get sorted from highest frequency to smallest.
1120 This controls the weight for minimal spanning tree algorithm */
1121static int
1122compare_freqs (const void *p1, const void *p2)
1123{
1124 const_edge e1 = *(const const_edge *)p1;
1125 const_edge e2 = *(const const_edge *)p2;
1126
1127 /* Critical edges needs to be split which introduce extra control flow.
1128 Make them more heavy. */
1129 int m1 = EDGE_CRITICAL_P (e1) ? 2 : 1;
1130 int m2 = EDGE_CRITICAL_P (e2) ? 2 : 1;
1131
1132 if (EDGE_FREQUENCY (e1) * m1 + m1 != EDGE_FREQUENCY (e2) * m2 + m2)
1133 return EDGE_FREQUENCY (e2) * m2 + m2 - EDGE_FREQUENCY (e1) * m1 - m1;
1134 /* Stabilize sort. */
1135 if (e1->src->index != e2->src->index)
1136 return e2->src->index - e1->src->index;
1137 return e2->dest->index - e1->dest->index;
1138}
1139
1140/* Only read execution count for thunks. */
1141
1142void
1143read_thunk_profile (struct cgraph_node *node)
1144{
1145 tree old = current_function_decl;
1146 current_function_decl = node->decl;
1147 gcov_type *counts = get_coverage_counts (GCOV_COUNTER_ARCS, 0, 0, 1);
1148 if (counts)
1149 {
1150 node->callees->count = node->count
1151 = profile_count::from_gcov_type (v: counts[0]);
1152 free (ptr: counts);
1153 }
1154 current_function_decl = old;
1155 return;
1156}
1157
1158
1159/* Instrument and/or analyze program behavior based on program the CFG.
1160
1161 This function creates a representation of the control flow graph (of
1162 the function being compiled) that is suitable for the instrumentation
1163 of edges and/or converting measured edge counts to counts on the
1164 complete CFG.
1165
1166 When FLAG_PROFILE_ARCS is nonzero, this function instruments the edges in
1167 the flow graph that are needed to reconstruct the dynamic behavior of the
1168 flow graph. This data is written to the gcno file for gcov.
1169
1170 When FLAG_PROFILE_CONDITIONS is nonzero, this functions instruments the
1171 edges in the control flow graph to track what conditions are evaluated to in
1172 order to determine what conditions are covered and have an independent
1173 effect on the outcome (modified condition/decision coverage). This data is
1174 written to the gcno file for gcov.
1175
1176 When FLAG_BRANCH_PROBABILITIES is nonzero, this function reads auxiliary
1177 information from the gcda file containing edge count information from
1178 previous executions of the function being compiled. In this case, the
1179 control flow graph is annotated with actual execution counts by
1180 compute_branch_probabilities().
1181
1182 Main entry point of this file. */
1183
1184void
1185branch_prob (bool thunk)
1186{
1187 basic_block bb;
1188 unsigned i;
1189 unsigned num_edges, ignored_edges;
1190 unsigned num_instrumented;
1191 struct edge_list *el;
1192 histogram_values values = histogram_values ();
1193 unsigned cfg_checksum, lineno_checksum;
1194 bool output_to_file;
1195
1196 total_num_times_called++;
1197
1198 flow_call_edges_add (NULL);
1199 add_noreturn_fake_exit_edges ();
1200
1201 hash_set <location_triplet_hash> streamed_locations;
1202
1203 if (!thunk)
1204 {
1205 /* We can't handle cyclic regions constructed using abnormal edges.
1206 To avoid these we replace every source of abnormal edge by a fake
1207 edge from entry node and every destination by fake edge to exit.
1208 This keeps graph acyclic and our calculation exact for all normal
1209 edges except for exit and entrance ones.
1210
1211 We also add fake exit edges for each call and asm statement in the
1212 basic, since it may not return. */
1213
1214 FOR_EACH_BB_FN (bb, cfun)
1215 {
1216 int need_exit_edge = 0, need_entry_edge = 0;
1217 int have_exit_edge = 0, have_entry_edge = 0;
1218 edge e;
1219 edge_iterator ei;
1220
1221 /* Functions returning multiple times are not handled by extra edges.
1222 Instead we simply allow negative counts on edges from exit to the
1223 block past call and corresponding probabilities. We can't go
1224 with the extra edges because that would result in flowgraph that
1225 needs to have fake edges outside the spanning tree. */
1226
1227 FOR_EACH_EDGE (e, ei, bb->succs)
1228 {
1229 gimple_stmt_iterator gsi;
1230 gimple *last = NULL;
1231
1232 /* It may happen that there are compiler generated statements
1233 without a locus at all. Go through the basic block from the
1234 last to the first statement looking for a locus. */
1235 for (gsi = gsi_last_nondebug_bb (bb);
1236 !gsi_end_p (i: gsi);
1237 gsi_prev_nondebug (i: &gsi))
1238 {
1239 last = gsi_stmt (i: gsi);
1240 if (!RESERVED_LOCATION_P (gimple_location (last)))
1241 break;
1242 }
1243
1244 /* Edge with goto locus might get wrong coverage info unless
1245 it is the only edge out of BB.
1246 Don't do that when the locuses match, so
1247 if (blah) goto something;
1248 is not computed twice. */
1249 if (last
1250 && gimple_has_location (g: last)
1251 && !RESERVED_LOCATION_P (e->goto_locus)
1252 && !single_succ_p (bb)
1253 && (LOCATION_FILE (e->goto_locus)
1254 != LOCATION_FILE (gimple_location (last))
1255 || (LOCATION_LINE (e->goto_locus)
1256 != LOCATION_LINE (gimple_location (last)))))
1257 {
1258 basic_block new_bb = split_edge (e);
1259 edge ne = single_succ_edge (bb: new_bb);
1260 ne->goto_locus = e->goto_locus;
1261 }
1262 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1263 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1264 need_exit_edge = 1;
1265 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1266 have_exit_edge = 1;
1267 }
1268 FOR_EACH_EDGE (e, ei, bb->preds)
1269 {
1270 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1271 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1272 need_entry_edge = 1;
1273 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1274 have_entry_edge = 1;
1275 }
1276
1277 if (need_exit_edge && !have_exit_edge)
1278 {
1279 if (dump_file)
1280 fprintf (stream: dump_file, format: "Adding fake exit edge to bb %i\n",
1281 bb->index);
1282 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
1283 }
1284 if (need_entry_edge && !have_entry_edge)
1285 {
1286 if (dump_file)
1287 fprintf (stream: dump_file, format: "Adding fake entry edge to bb %i\n",
1288 bb->index);
1289 make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
1290 /* Avoid bbs that have both fake entry edge and also some
1291 exit edge. One of those edges wouldn't be added to the
1292 spanning tree, but we can't instrument any of them. */
1293 if (have_exit_edge || need_exit_edge)
1294 {
1295 gimple_stmt_iterator gsi;
1296 gimple *first;
1297
1298 gsi = gsi_start_nondebug_after_labels_bb (bb);
1299 gcc_checking_assert (!gsi_end_p (gsi));
1300 first = gsi_stmt (i: gsi);
1301 /* Don't split the bbs containing __builtin_setjmp_receiver
1302 or ABNORMAL_DISPATCHER calls. These are very
1303 special and don't expect anything to be inserted before
1304 them. */
1305 if (is_gimple_call (gs: first)
1306 && (gimple_call_builtin_p (first, BUILT_IN_SETJMP_RECEIVER)
1307 || (gimple_call_flags (first) & ECF_RETURNS_TWICE)
1308 || (gimple_call_internal_p (gs: first)
1309 && (gimple_call_internal_fn (gs: first)
1310 == IFN_ABNORMAL_DISPATCHER))))
1311 continue;
1312
1313 if (dump_file)
1314 fprintf (stream: dump_file, format: "Splitting bb %i after labels\n",
1315 bb->index);
1316 split_block_after_labels (bb);
1317 }
1318 }
1319 }
1320 }
1321
1322 el = create_edge_list ();
1323 num_edges = NUM_EDGES (el);
1324 qsort (el->index_to_edge, num_edges, sizeof (edge), compare_freqs);
1325 alloc_aux_for_edges (sizeof (struct edge_profile_info));
1326
1327 /* The basic blocks are expected to be numbered sequentially. */
1328 compact_blocks ();
1329
1330 ignored_edges = 0;
1331 for (i = 0 ; i < num_edges ; i++)
1332 {
1333 edge e = INDEX_EDGE (el, i);
1334
1335 /* Mark edges we've replaced by fake edges above as ignored. */
1336 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1337 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1338 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1339 {
1340 EDGE_INFO (e)->ignore = 1;
1341 ignored_edges++;
1342 }
1343 /* Ignore edges after musttail calls. */
1344 if (cfun->has_musttail
1345 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1346 {
1347 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb: e->src);
1348 gimple *stmt = gsi_stmt (i: gsi);
1349 if (stmt
1350 && is_gimple_call (gs: stmt)
1351 && gimple_call_must_tail_p (s: as_a <const gcall *> (p: stmt)))
1352 {
1353 EDGE_INFO (e)->ignore = 1;
1354 ignored_edges++;
1355 }
1356 }
1357 }
1358
1359 /* Create spanning tree from basic block graph, mark each edge that is
1360 on the spanning tree. We insert as many abnormal and critical edges
1361 as possible to minimize number of edge splits necessary. */
1362
1363 if (!thunk)
1364 find_spanning_tree (el);
1365 else
1366 {
1367 edge e;
1368 edge_iterator ei;
1369 /* Keep only edge from entry block to be instrumented. */
1370 FOR_EACH_BB_FN (bb, cfun)
1371 FOR_EACH_EDGE (e, ei, bb->succs)
1372 EDGE_INFO (e)->ignore = true;
1373 }
1374
1375
1376 /* Fake edges that are not on the tree will not be instrumented, so
1377 mark them ignored. */
1378 for (num_instrumented = i = 0; i < num_edges; i++)
1379 {
1380 edge e = INDEX_EDGE (el, i);
1381 struct edge_profile_info *inf = EDGE_INFO (e);
1382
1383 if (inf->ignore || inf->on_tree)
1384 /*NOP*/;
1385 else if (e->flags & EDGE_FAKE)
1386 {
1387 inf->ignore = 1;
1388 ignored_edges++;
1389 }
1390 else
1391 num_instrumented++;
1392 }
1393
1394 total_num_blocks += n_basic_blocks_for_fn (cfun);
1395 if (dump_file)
1396 fprintf (stream: dump_file, format: "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
1397
1398 total_num_edges += num_edges;
1399 if (dump_file)
1400 fprintf (stream: dump_file, format: "%d edges\n", num_edges);
1401
1402 total_num_edges_ignored += ignored_edges;
1403 if (dump_file)
1404 fprintf (stream: dump_file, format: "%d ignored edges\n", ignored_edges);
1405
1406 total_num_edges_instrumented += num_instrumented;
1407 if (dump_file)
1408 fprintf (stream: dump_file, format: "%d instrumentation edges\n", num_instrumented);
1409
1410 /* Dump function body before it's instrumented.
1411 It helps to debug gcov tool. */
1412 if (dump_file && (dump_flags & TDF_DETAILS))
1413 dump_function_to_file (cfun->decl, dump_file, dump_flags);
1414
1415 /* Compute two different checksums. Note that we want to compute
1416 the checksum in only once place, since it depends on the shape
1417 of the control flow which can change during
1418 various transformations. */
1419 if (thunk)
1420 {
1421 /* At stream in time we do not have CFG, so we cannot do checksums. */
1422 cfg_checksum = 0;
1423 lineno_checksum = 0;
1424 }
1425 else
1426 {
1427 cfg_checksum = coverage_compute_cfg_checksum (cfun);
1428 lineno_checksum = coverage_compute_lineno_checksum ();
1429 }
1430
1431 /* Write the data from which gcov can reconstruct the basic block
1432 graph and function line numbers (the gcno file). */
1433 output_to_file = false;
1434 if (coverage_begin_function (lineno_checksum, cfg_checksum))
1435 {
1436 gcov_position_t offset;
1437
1438 /* The condition coverage needs a deeper analysis to identify expressions
1439 of conditions, which means it is not yet ready to write to the gcno
1440 file. It will write its entries later, but needs to know if it do it
1441 in the first place, which is controlled by the return value of
1442 coverage_begin_function. */
1443 output_to_file = true;
1444
1445 /* Basic block flags */
1446 offset = gcov_write_tag (GCOV_TAG_BLOCKS);
1447 gcov_write_unsigned (n_basic_blocks_for_fn (cfun));
1448 gcov_write_length (offset);
1449
1450 /* Arcs */
1451 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
1452 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1453 {
1454 edge e;
1455 edge_iterator ei;
1456
1457 offset = gcov_write_tag (GCOV_TAG_ARCS);
1458 gcov_write_unsigned (bb->index);
1459
1460 FOR_EACH_EDGE (e, ei, bb->succs)
1461 {
1462 struct edge_profile_info *i = EDGE_INFO (e);
1463 if (!i->ignore)
1464 {
1465 unsigned flag_bits = 0;
1466
1467 if (i->on_tree)
1468 flag_bits |= GCOV_ARC_ON_TREE;
1469 if (e->flags & EDGE_FAKE)
1470 flag_bits |= GCOV_ARC_FAKE;
1471 if (e->flags & EDGE_FALLTHRU)
1472 flag_bits |= GCOV_ARC_FALLTHROUGH;
1473 if (e->flags & EDGE_TRUE_VALUE)
1474 flag_bits |= GCOV_ARC_TRUE;
1475 if (e->flags & EDGE_FALSE_VALUE)
1476 flag_bits |= GCOV_ARC_FALSE;
1477 /* On trees we don't have fallthru flags, but we can
1478 recompute them from CFG shape. */
1479 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)
1480 && e->src->next_bb == e->dest)
1481 flag_bits |= GCOV_ARC_FALLTHROUGH;
1482
1483 gcov_write_unsigned (e->dest->index);
1484 gcov_write_unsigned (flag_bits);
1485 }
1486 }
1487
1488 gcov_write_length (offset);
1489 }
1490
1491 /* Line numbers. */
1492 /* Initialize the output. */
1493 output_location (streamed_locations: &streamed_locations, NULL, line: 0, NULL, NULL);
1494
1495 hash_set<location_hash> seen_locations;
1496
1497 FOR_EACH_BB_FN (bb, cfun)
1498 {
1499 gimple_stmt_iterator gsi;
1500 gcov_position_t offset = 0;
1501
1502 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
1503 {
1504 location_t loc = DECL_SOURCE_LOCATION (current_function_decl);
1505 if (!RESERVED_LOCATION_P (loc))
1506 {
1507 seen_locations.add (k: loc);
1508 expanded_location curr_location = expand_location (loc);
1509 output_location (streamed_locations: &streamed_locations, file_name: curr_location.file,
1510 MAX (1, curr_location.line), offset: &offset, bb);
1511 }
1512 }
1513
1514 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1515 {
1516 gimple *stmt = gsi_stmt (i: gsi);
1517 location_t loc = gimple_location (g: stmt);
1518 if (!RESERVED_LOCATION_P (loc))
1519 {
1520 seen_locations.add (k: loc);
1521 output_location (streamed_locations: &streamed_locations, file_name: gimple_filename (stmt),
1522 MAX (1, gimple_lineno (stmt)), offset: &offset, bb);
1523 }
1524 }
1525
1526 /* Notice GOTO expressions eliminated while constructing the CFG.
1527 It's hard to distinguish such expression, but goto_locus should
1528 not be any of already seen location. */
1529 location_t loc;
1530 if (single_succ_p (bb)
1531 && (loc = single_succ_edge (bb)->goto_locus)
1532 && !RESERVED_LOCATION_P (loc)
1533 && !seen_locations.contains (k: loc))
1534 {
1535 expanded_location curr_location = expand_location (loc);
1536 output_location (streamed_locations: &streamed_locations, file_name: curr_location.file,
1537 MAX (1, curr_location.line), offset: &offset, bb);
1538 }
1539
1540 if (offset)
1541 {
1542 /* A file of NULL indicates the end of run. */
1543 gcov_write_unsigned (0);
1544 gcov_write_string (NULL);
1545 gcov_write_length (offset);
1546 }
1547 }
1548 }
1549
1550 if (flag_profile_values)
1551 gimple_find_values_to_profile (&values);
1552
1553 if (flag_branch_probabilities)
1554 {
1555 compute_branch_probabilities (cfg_checksum, lineno_checksum);
1556 if (flag_profile_values)
1557 compute_value_histograms (values, cfg_checksum, lineno_checksum);
1558 }
1559
1560 remove_fake_edges ();
1561
1562 if (condition_coverage_flag || path_coverage_flag || profile_arc_flag)
1563 gimple_init_gcov_profiler ();
1564
1565 if (condition_coverage_flag)
1566 {
1567 struct condcov *cov = find_conditions (cfun);
1568 gcc_assert (cov);
1569 const size_t nconds = cov_length (cov);
1570 total_num_conds += nconds;
1571
1572 if (coverage_counter_alloc (GCOV_COUNTER_CONDS, 2 * nconds))
1573 {
1574 gcov_position_t offset {};
1575 if (output_to_file)
1576 offset = gcov_write_tag (GCOV_TAG_CONDS);
1577
1578 for (size_t i = 0; i != nconds; ++i)
1579 {
1580 array_slice<basic_block> expr = cov_blocks (cov, i);
1581 array_slice<uint64_t> masks = cov_masks (cov, i);
1582 array_slice<sbitmap> maps = cov_maps (cov, n: i);
1583 gcc_assert (expr.is_valid ());
1584 gcc_assert (masks.is_valid ());
1585 gcc_assert (maps.is_valid ());
1586
1587 size_t terms = instrument_decisions (expr, i, maps, masks);
1588 if (output_to_file)
1589 {
1590 gcov_write_unsigned (expr.front ()->index);
1591 gcov_write_unsigned (terms);
1592 }
1593 }
1594 if (output_to_file)
1595 gcov_write_length (offset);
1596 }
1597 cov_free (cov);
1598 }
1599
1600 /* For each edge not on the spanning tree, add counting code. */
1601 if (profile_arc_flag
1602 && coverage_counter_alloc (GCOV_COUNTER_ARCS, num_instrumented))
1603 {
1604 unsigned n_instrumented;
1605
1606 n_instrumented = instrument_edges (el);
1607
1608 gcc_assert (n_instrumented == num_instrumented);
1609
1610 if (flag_profile_values)
1611 instrument_values (values);
1612 }
1613
1614 unsigned instrument_prime_paths (struct function*);
1615 if (path_coverage_flag)
1616 {
1617 const unsigned npaths = instrument_prime_paths (cfun);
1618 if (output_to_file)
1619 {
1620 gcov_position_t offset = gcov_write_tag (GCOV_TAG_PATHS);
1621 gcov_write_unsigned (npaths);
1622 gcov_write_length (offset);
1623 }
1624 }
1625
1626 free_aux_for_edges ();
1627
1628 values.release ();
1629 free_edge_list (el);
1630 /* Commit changes done by instrumentation. */
1631 gsi_commit_edge_inserts ();
1632
1633 coverage_end_function (lineno_checksum, cfg_checksum);
1634 if (flag_branch_probabilities
1635 && (profile_status_for_fn (cfun) == PROFILE_READ))
1636 {
1637 if (dump_file && (dump_flags & TDF_DETAILS))
1638 report_predictor_hitrates ();
1639 sreal nit;
1640 bool reliable;
1641
1642 /* At this moment we have precise loop iteration count estimates.
1643 Record them to loop structure before the profile gets out of date. */
1644 for (auto loop : loops_list (cfun, 0))
1645 if (loop->header->count.ipa ().nonzero_p ()
1646 && expected_loop_iterations_by_profile (loop, ret: &nit, reliable: &reliable)
1647 && reliable)
1648 {
1649 widest_int bound = nit.to_nearest_int ();
1650 loop->any_estimate = false;
1651 record_niter_bound (loop, bound, true, false);
1652 }
1653 compute_function_frequency ();
1654 }
1655}
1656
1657/* Union find algorithm implementation for the basic blocks using
1658 aux fields. */
1659
1660static basic_block
1661find_group (basic_block bb)
1662{
1663 basic_block group = bb, bb1;
1664
1665 while ((basic_block) group->aux != group)
1666 group = (basic_block) group->aux;
1667
1668 /* Compress path. */
1669 while ((basic_block) bb->aux != group)
1670 {
1671 bb1 = (basic_block) bb->aux;
1672 bb->aux = (void *) group;
1673 bb = bb1;
1674 }
1675 return group;
1676}
1677
1678static void
1679union_groups (basic_block bb1, basic_block bb2)
1680{
1681 basic_block bb1g = find_group (bb: bb1);
1682 basic_block bb2g = find_group (bb: bb2);
1683
1684 /* ??? I don't have a place for the rank field. OK. Lets go w/o it,
1685 this code is unlikely going to be performance problem anyway. */
1686 gcc_assert (bb1g != bb2g);
1687
1688 bb1g->aux = bb2g;
1689}
1690
1691/* This function searches all of the edges in the program flow graph, and puts
1692 as many bad edges as possible onto the spanning tree. Bad edges include
1693 abnormals edges, which can't be instrumented at the moment. Since it is
1694 possible for fake edges to form a cycle, we will have to develop some
1695 better way in the future. Also put critical edges to the tree, since they
1696 are more expensive to instrument. */
1697
1698static void
1699find_spanning_tree (struct edge_list *el)
1700{
1701 int i;
1702 int num_edges = NUM_EDGES (el);
1703 basic_block bb;
1704
1705 /* We use aux field for standard union-find algorithm. */
1706 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
1707 bb->aux = bb;
1708
1709 /* Add fake edge exit to entry we can't instrument. */
1710 union_groups (EXIT_BLOCK_PTR_FOR_FN (cfun), ENTRY_BLOCK_PTR_FOR_FN (cfun));
1711
1712 /* First add all abnormal edges to the tree unless they form a cycle. Also
1713 add all edges to the exit block to avoid inserting profiling code behind
1714 setting return value from function. */
1715 for (i = 0; i < num_edges; i++)
1716 {
1717 edge e = INDEX_EDGE (el, i);
1718 if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
1719 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1720 && !EDGE_INFO (e)->ignore
1721 && (find_group (bb: e->src) != find_group (bb: e->dest)))
1722 {
1723 if (dump_file)
1724 fprintf (stream: dump_file, format: "Abnormal edge %d to %d put to tree\n",
1725 e->src->index, e->dest->index);
1726 EDGE_INFO (e)->on_tree = 1;
1727 union_groups (bb1: e->src, bb2: e->dest);
1728 }
1729 }
1730
1731 /* And now the rest. Edge list is sorted according to frequencies and
1732 thus we will produce minimal spanning tree. */
1733 for (i = 0; i < num_edges; i++)
1734 {
1735 edge e = INDEX_EDGE (el, i);
1736 if (!EDGE_INFO (e)->ignore
1737 && find_group (bb: e->src) != find_group (bb: e->dest))
1738 {
1739 if (dump_file)
1740 fprintf (stream: dump_file, format: "Normal edge %d to %d put to tree\n",
1741 e->src->index, e->dest->index);
1742 EDGE_INFO (e)->on_tree = 1;
1743 union_groups (bb1: e->src, bb2: e->dest);
1744 }
1745 }
1746
1747 clear_aux_for_blocks ();
1748}
1749
1750/* Perform file-level initialization for branch-prob processing. */
1751
1752void
1753init_branch_prob (void)
1754{
1755 int i;
1756
1757 total_num_blocks = 0;
1758 total_num_edges = 0;
1759 total_num_edges_ignored = 0;
1760 total_num_edges_instrumented = 0;
1761 total_num_blocks_created = 0;
1762 total_num_passes = 0;
1763 total_num_times_called = 0;
1764 total_num_branches = 0;
1765 total_num_conds = 0;
1766 for (i = 0; i < 20; i++)
1767 total_hist_br_prob[i] = 0;
1768}
1769
1770/* Performs file-level cleanup after branch-prob processing
1771 is completed. */
1772
1773void
1774end_branch_prob (void)
1775{
1776 if (dump_file)
1777 {
1778 fprintf (stream: dump_file, format: "\n");
1779 fprintf (stream: dump_file, format: "Total number of blocks: %d\n",
1780 total_num_blocks);
1781 fprintf (stream: dump_file, format: "Total number of edges: %d\n", total_num_edges);
1782 fprintf (stream: dump_file, format: "Total number of ignored edges: %d\n",
1783 total_num_edges_ignored);
1784 fprintf (stream: dump_file, format: "Total number of instrumented edges: %d\n",
1785 total_num_edges_instrumented);
1786 fprintf (stream: dump_file, format: "Total number of blocks created: %d\n",
1787 total_num_blocks_created);
1788 fprintf (stream: dump_file, format: "Total number of graph solution passes: %d\n",
1789 total_num_passes);
1790 if (total_num_times_called != 0)
1791 fprintf (stream: dump_file, format: "Average number of graph solution passes: %d\n",
1792 (total_num_passes + (total_num_times_called >> 1))
1793 / total_num_times_called);
1794 fprintf (stream: dump_file, format: "Total number of branches: %d\n",
1795 total_num_branches);
1796 if (total_num_branches)
1797 {
1798 int i;
1799
1800 for (i = 0; i < 10; i++)
1801 fprintf (stream: dump_file, format: "%d%% branches in range %d-%d%%\n",
1802 (total_hist_br_prob[i] + total_hist_br_prob[19-i]) * 100
1803 / total_num_branches, 5*i, 5*i+5);
1804 }
1805 fprintf (stream: dump_file, format: "Total number of conditions: %d\n",
1806 total_num_conds);
1807 }
1808}
1809
1810/* Return true if any cfg coverage/profiling is enabled; -fprofile-arcs
1811 -fcondition-coverage -fpath-coverage. */
1812bool coverage_instrumentation_p ()
1813{
1814 return profile_arc_flag || condition_coverage_flag || path_coverage_flag;
1815}
1816

source code of gcc/profile.cc