1/* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "target.h"
25#include "function.h"
26#include "rtl.h"
27#include "tree.h"
28#include "memmodel.h"
29#include "tm_p.h"
30#include "stringpool.h"
31#include "regs.h"
32#include "emit-rtl.h"
33#include "cgraph.h"
34#include "diagnostic-core.h"
35#include "fold-const.h"
36#include "stor-layout.h"
37#include "varasm.h"
38#include "print-tree.h"
39#include "langhooks.h"
40#include "tree-inline.h"
41#include "dumpfile.h"
42#include "gimplify.h"
43#include "attribs.h"
44#include "debug.h"
45#include "calls.h"
46
47/* Data type for the expressions representing sizes of data types.
48 It is the first integer type laid out. */
49tree sizetype_tab[(int) stk_type_kind_last];
50
51/* If nonzero, this is an upper limit on alignment of structure fields.
52 The value is measured in bits. */
53unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
54
55static tree self_referential_size (tree);
56static void finalize_record_size (record_layout_info);
57static void finalize_type_size (tree);
58static void place_union_field (record_layout_info, tree);
59static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
60 HOST_WIDE_INT, tree);
61extern void debug_rli (record_layout_info);
62
63/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
64 to serve as the actual size-expression for a type or decl. */
65
66tree
67variable_size (tree size)
68{
69 /* Obviously. */
70 if (TREE_CONSTANT (size))
71 return size;
72
73 /* If the size is self-referential, we can't make a SAVE_EXPR (see
74 save_expr for the rationale). But we can do something else. */
75 if (CONTAINS_PLACEHOLDER_P (size))
76 return self_referential_size (size);
77
78 /* If we are in the global binding level, we can't make a SAVE_EXPR
79 since it may end up being shared across functions, so it is up
80 to the front-end to deal with this case. */
81 if (lang_hooks.decls.global_bindings_p ())
82 return size;
83
84 return save_expr (size);
85}
86
87/* An array of functions used for self-referential size computation. */
88static GTY(()) vec<tree, va_gc> *size_functions;
89
90/* Return true if T is a self-referential component reference. */
91
92static bool
93self_referential_component_ref_p (tree t)
94{
95 if (TREE_CODE (t) != COMPONENT_REF)
96 return false;
97
98 while (REFERENCE_CLASS_P (t))
99 t = TREE_OPERAND (t, 0);
100
101 return (TREE_CODE (t) == PLACEHOLDER_EXPR);
102}
103
104/* Similar to copy_tree_r but do not copy component references involving
105 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
106 and substituted in substitute_in_expr. */
107
108static tree
109copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
110{
111 enum tree_code code = TREE_CODE (*tp);
112
113 /* Stop at types, decls, constants like copy_tree_r. */
114 if (TREE_CODE_CLASS (code) == tcc_type
115 || TREE_CODE_CLASS (code) == tcc_declaration
116 || TREE_CODE_CLASS (code) == tcc_constant)
117 {
118 *walk_subtrees = 0;
119 return NULL_TREE;
120 }
121
122 /* This is the pattern built in ada/make_aligning_type. */
123 else if (code == ADDR_EXPR
124 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
125 {
126 *walk_subtrees = 0;
127 return NULL_TREE;
128 }
129
130 /* Default case: the component reference. */
131 else if (self_referential_component_ref_p (t: *tp))
132 {
133 *walk_subtrees = 0;
134 return NULL_TREE;
135 }
136
137 /* We're not supposed to have them in self-referential size trees
138 because we wouldn't properly control when they are evaluated.
139 However, not creating superfluous SAVE_EXPRs requires accurate
140 tracking of readonly-ness all the way down to here, which we
141 cannot always guarantee in practice. So punt in this case. */
142 else if (code == SAVE_EXPR)
143 return error_mark_node;
144
145 else if (code == STATEMENT_LIST)
146 gcc_unreachable ();
147
148 return copy_tree_r (tp, walk_subtrees, data);
149}
150
151/* Given a SIZE expression that is self-referential, return an equivalent
152 expression to serve as the actual size expression for a type. */
153
154static tree
155self_referential_size (tree size)
156{
157 static unsigned HOST_WIDE_INT fnno = 0;
158 vec<tree> self_refs = vNULL;
159 tree param_type_list = NULL, param_decl_list = NULL;
160 tree t, ref, return_type, fntype, fnname, fndecl;
161 unsigned int i;
162 char buf[128];
163 vec<tree, va_gc> *args = NULL;
164
165 /* Do not factor out simple operations. */
166 t = skip_simple_constant_arithmetic (size);
167 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
168 return size;
169
170 /* Collect the list of self-references in the expression. */
171 find_placeholder_in_expr (size, &self_refs);
172 gcc_assert (self_refs.length () > 0);
173
174 /* Obtain a private copy of the expression. */
175 t = size;
176 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
177 return size;
178 size = t;
179
180 /* Build the parameter and argument lists in parallel; also
181 substitute the former for the latter in the expression. */
182 vec_alloc (v&: args, nelems: self_refs.length ());
183 FOR_EACH_VEC_ELT (self_refs, i, ref)
184 {
185 tree subst, param_name, param_type, param_decl;
186
187 if (DECL_P (ref))
188 {
189 /* We shouldn't have true variables here. */
190 gcc_assert (TREE_READONLY (ref));
191 subst = ref;
192 }
193 /* This is the pattern built in ada/make_aligning_type. */
194 else if (TREE_CODE (ref) == ADDR_EXPR)
195 subst = ref;
196 /* Default case: the component reference. */
197 else
198 subst = TREE_OPERAND (ref, 1);
199
200 sprintf (s: buf, format: "p%d", i);
201 param_name = get_identifier (buf);
202 param_type = TREE_TYPE (ref);
203 param_decl
204 = build_decl (input_location, PARM_DECL, param_name, param_type);
205 DECL_ARG_TYPE (param_decl) = param_type;
206 DECL_ARTIFICIAL (param_decl) = 1;
207 TREE_READONLY (param_decl) = 1;
208
209 size = substitute_in_expr (size, subst, param_decl);
210
211 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
212 param_decl_list = chainon (param_decl, param_decl_list);
213 args->quick_push (obj: ref);
214 }
215
216 self_refs.release ();
217
218 /* Append 'void' to indicate that the number of parameters is fixed. */
219 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
220
221 /* The 3 lists have been created in reverse order. */
222 param_type_list = nreverse (param_type_list);
223 param_decl_list = nreverse (param_decl_list);
224
225 /* Build the function type. */
226 return_type = TREE_TYPE (size);
227 fntype = build_function_type (return_type, param_type_list);
228
229 /* Build the function declaration. */
230 sprintf (s: buf, format: "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
231 fnname = get_file_function_name (buf);
232 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
233 for (t = param_decl_list; t; t = DECL_CHAIN (t))
234 DECL_CONTEXT (t) = fndecl;
235 DECL_ARGUMENTS (fndecl) = param_decl_list;
236 DECL_RESULT (fndecl)
237 = build_decl (input_location, RESULT_DECL, 0, return_type);
238 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
239
240 /* The function has been created by the compiler and we don't
241 want to emit debug info for it. */
242 DECL_ARTIFICIAL (fndecl) = 1;
243 DECL_IGNORED_P (fndecl) = 1;
244
245 /* It is supposed to be "const" and never throw. */
246 TREE_READONLY (fndecl) = 1;
247 TREE_NOTHROW (fndecl) = 1;
248
249 /* We want it to be inlined when this is deemed profitable, as
250 well as discarded if every call has been integrated. */
251 DECL_DECLARED_INLINE_P (fndecl) = 1;
252
253 /* It is made up of a unique return statement. */
254 DECL_INITIAL (fndecl) = make_node (BLOCK);
255 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
256 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
257 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
258 TREE_STATIC (fndecl) = 1;
259
260 /* Put it onto the list of size functions. */
261 vec_safe_push (v&: size_functions, obj: fndecl);
262
263 /* Replace the original expression with a call to the size function. */
264 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
265}
266
267/* Take, queue and compile all the size functions. It is essential that
268 the size functions be gimplified at the very end of the compilation
269 in order to guarantee transparent handling of self-referential sizes.
270 Otherwise the GENERIC inliner would not be able to inline them back
271 at each of their call sites, thus creating artificial non-constant
272 size expressions which would trigger nasty problems later on. */
273
274void
275finalize_size_functions (void)
276{
277 unsigned int i;
278 tree fndecl;
279
280 for (i = 0; size_functions && size_functions->iterate (ix: i, ptr: &fndecl); i++)
281 {
282 allocate_struct_function (fndecl, false);
283 set_cfun (NULL);
284 dump_function (phase: TDI_original, fn: fndecl);
285
286 /* As these functions are used to describe the layout of variable-length
287 structures, debug info generation needs their implementation. */
288 debug_hooks->size_function (fndecl);
289 gimplify_function_tree (fndecl);
290 cgraph_node::finalize_function (fndecl, false);
291 }
292
293 vec_free (v&: size_functions);
294}
295
296/* Return a machine mode of class MCLASS with SIZE bits of precision,
297 if one exists. The mode may have padding bits as well the SIZE
298 value bits. If LIMIT is nonzero, disregard modes wider than
299 MAX_FIXED_MODE_SIZE. */
300
301opt_machine_mode
302mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
303{
304 machine_mode mode;
305 int i;
306
307 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
308 return opt_machine_mode ();
309
310 /* Get the first mode which has this size, in the specified class. */
311 FOR_EACH_MODE_IN_CLASS (mode, mclass)
312 if (known_eq (GET_MODE_PRECISION (mode), size))
313 return mode;
314
315 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
316 for (i = 0; i < NUM_INT_N_ENTS; i ++)
317 if (known_eq (int_n_data[i].bitsize, size)
318 && int_n_enabled_p[i])
319 return int_n_data[i].m;
320
321 return opt_machine_mode ();
322}
323
324/* Similar, except passed a tree node. */
325
326opt_machine_mode
327mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
328{
329 unsigned HOST_WIDE_INT uhwi;
330 unsigned int ui;
331
332 if (!tree_fits_uhwi_p (size))
333 return opt_machine_mode ();
334 uhwi = tree_to_uhwi (size);
335 ui = uhwi;
336 if (uhwi != ui)
337 return opt_machine_mode ();
338 return mode_for_size (size: ui, mclass, limit);
339}
340
341/* Return the narrowest mode of class MCLASS that contains at least
342 SIZE bits. Abort if no such mode exists. */
343
344machine_mode
345smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
346{
347 machine_mode mode = VOIDmode;
348 int i;
349
350 /* Get the first mode which has at least this size, in the
351 specified class. */
352 FOR_EACH_MODE_IN_CLASS (mode, mclass)
353 if (known_ge (GET_MODE_PRECISION (mode), size))
354 break;
355
356 gcc_assert (mode != VOIDmode);
357
358 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
359 for (i = 0; i < NUM_INT_N_ENTS; i ++)
360 if (known_ge (int_n_data[i].bitsize, size)
361 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
362 && int_n_enabled_p[i])
363 mode = int_n_data[i].m;
364
365 return mode;
366}
367
368/* Return an integer mode of exactly the same size as MODE, if one exists. */
369
370opt_scalar_int_mode
371int_mode_for_mode (machine_mode mode)
372{
373 switch (GET_MODE_CLASS (mode))
374 {
375 case MODE_INT:
376 case MODE_PARTIAL_INT:
377 return as_a <scalar_int_mode> (m: mode);
378
379 case MODE_COMPLEX_INT:
380 case MODE_COMPLEX_FLOAT:
381 case MODE_FLOAT:
382 case MODE_DECIMAL_FLOAT:
383 case MODE_FRACT:
384 case MODE_ACCUM:
385 case MODE_UFRACT:
386 case MODE_UACCUM:
387 case MODE_VECTOR_BOOL:
388 case MODE_VECTOR_INT:
389 case MODE_VECTOR_FLOAT:
390 case MODE_VECTOR_FRACT:
391 case MODE_VECTOR_ACCUM:
392 case MODE_VECTOR_UFRACT:
393 case MODE_VECTOR_UACCUM:
394 return int_mode_for_size (size: GET_MODE_BITSIZE (mode), limit: 0);
395
396 case MODE_OPAQUE:
397 return opt_scalar_int_mode ();
398
399 case MODE_RANDOM:
400 if (mode == BLKmode)
401 return opt_scalar_int_mode ();
402
403 /* fall through */
404
405 case MODE_CC:
406 default:
407 gcc_unreachable ();
408 }
409}
410
411/* Find a mode that can be used for efficient bitwise operations on MODE,
412 if one exists. */
413
414opt_machine_mode
415bitwise_mode_for_mode (machine_mode mode)
416{
417 /* Quick exit if we already have a suitable mode. */
418 scalar_int_mode int_mode;
419 if (is_a <scalar_int_mode> (m: mode, result: &int_mode)
420 && GET_MODE_BITSIZE (mode: int_mode) <= MAX_FIXED_MODE_SIZE)
421 return int_mode;
422
423 /* Reuse the sanity checks from int_mode_for_mode. */
424 gcc_checking_assert ((int_mode_for_mode (mode), true));
425
426 poly_int64 bitsize = GET_MODE_BITSIZE (mode);
427
428 /* Try to replace complex modes with complex modes. In general we
429 expect both components to be processed independently, so we only
430 care whether there is a register for the inner mode. */
431 if (COMPLEX_MODE_P (mode))
432 {
433 machine_mode trial = mode;
434 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
435 || mode_for_size (size: bitsize, mclass: MODE_COMPLEX_INT, limit: false).exists (mode: &trial))
436 && have_regs_of_mode[GET_MODE_INNER (trial)])
437 return trial;
438 }
439
440 /* Try to replace vector modes with vector modes. Also try using vector
441 modes if an integer mode would be too big. */
442 if (VECTOR_MODE_P (mode)
443 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
444 {
445 machine_mode trial = mode;
446 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
447 || mode_for_size (size: bitsize, mclass: MODE_VECTOR_INT, limit: 0).exists (mode: &trial))
448 && have_regs_of_mode[trial]
449 && targetm.vector_mode_supported_p (trial))
450 return trial;
451 }
452
453 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
454 return mode_for_size (size: bitsize, mclass: MODE_INT, limit: true);
455}
456
457/* Find a type that can be used for efficient bitwise operations on MODE.
458 Return null if no such mode exists. */
459
460tree
461bitwise_type_for_mode (machine_mode mode)
462{
463 if (!bitwise_mode_for_mode (mode).exists (mode: &mode))
464 return NULL_TREE;
465
466 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
467 tree inner_type = build_nonstandard_integer_type (inner_size, true);
468
469 if (VECTOR_MODE_P (mode))
470 return build_vector_type_for_mode (inner_type, mode);
471
472 if (COMPLEX_MODE_P (mode))
473 return build_complex_type (inner_type);
474
475 gcc_checking_assert (GET_MODE_INNER (mode) == mode);
476 return inner_type;
477}
478
479/* Find a mode that is suitable for representing a vector with NUNITS
480 elements of mode INNERMODE, if one exists. The returned mode can be
481 either an integer mode or a vector mode. */
482
483opt_machine_mode
484mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
485{
486 machine_mode mode;
487
488 /* First, look for a supported vector type. */
489 if (SCALAR_FLOAT_MODE_P (innermode))
490 mode = MIN_MODE_VECTOR_FLOAT;
491 else if (SCALAR_FRACT_MODE_P (innermode))
492 mode = MIN_MODE_VECTOR_FRACT;
493 else if (SCALAR_UFRACT_MODE_P (innermode))
494 mode = MIN_MODE_VECTOR_UFRACT;
495 else if (SCALAR_ACCUM_MODE_P (innermode))
496 mode = MIN_MODE_VECTOR_ACCUM;
497 else if (SCALAR_UACCUM_MODE_P (innermode))
498 mode = MIN_MODE_VECTOR_UACCUM;
499 else
500 mode = MIN_MODE_VECTOR_INT;
501
502 /* Only check the broader vector_mode_supported_any_target_p here.
503 We'll filter through target-specific availability and
504 vector_mode_supported_p later in vector_type_mode. */
505 FOR_EACH_MODE_FROM (mode, mode)
506 if (known_eq (GET_MODE_NUNITS (mode), nunits)
507 && GET_MODE_INNER (mode) == innermode
508 && targetm.vector_mode_supported_any_target_p (mode))
509 return mode;
510
511 /* For integers, try mapping it to a same-sized scalar mode. */
512 if (GET_MODE_CLASS (innermode) == MODE_INT)
513 {
514 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (mode: innermode);
515 if (int_mode_for_size (size: nbits, limit: 0).exists (mode: &mode)
516 && have_regs_of_mode[mode])
517 return mode;
518 }
519
520 return opt_machine_mode ();
521}
522
523/* If a piece of code is using vector mode VECTOR_MODE and also wants
524 to operate on elements of mode ELEMENT_MODE, return the vector mode
525 it should use for those elements. If NUNITS is nonzero, ensure that
526 the mode has exactly NUNITS elements, otherwise pick whichever vector
527 size pairs the most naturally with VECTOR_MODE; this may mean choosing
528 a mode with a different size and/or number of elements, depending on
529 what the target prefers. Return an empty opt_machine_mode if there
530 is no supported vector mode with the required properties.
531
532 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
533 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
534
535opt_machine_mode
536related_vector_mode (machine_mode vector_mode, scalar_mode element_mode,
537 poly_uint64 nunits)
538{
539 gcc_assert (VECTOR_MODE_P (vector_mode));
540 return targetm.vectorize.related_mode (vector_mode, element_mode, nunits);
541}
542
543/* If a piece of code is using vector mode VECTOR_MODE and also wants
544 to operate on integer vectors with the same element size and number
545 of elements, return the vector mode it should use. Return an empty
546 opt_machine_mode if there is no supported vector mode with the
547 required properties.
548
549 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
550 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
551
552opt_machine_mode
553related_int_vector_mode (machine_mode vector_mode)
554{
555 gcc_assert (VECTOR_MODE_P (vector_mode));
556 scalar_int_mode int_mode;
557 if (int_mode_for_mode (GET_MODE_INNER (vector_mode)).exists (mode: &int_mode))
558 return related_vector_mode (vector_mode, element_mode: int_mode,
559 nunits: GET_MODE_NUNITS (mode: vector_mode));
560 return opt_machine_mode ();
561}
562
563/* Return the alignment of MODE. This will be bounded by 1 and
564 BIGGEST_ALIGNMENT. */
565
566unsigned int
567get_mode_alignment (machine_mode mode)
568{
569 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
570}
571
572/* Return the natural mode of an array, given that it is SIZE bytes in
573 total and has elements of type ELEM_TYPE. */
574
575static machine_mode
576mode_for_array (tree elem_type, tree size)
577{
578 tree elem_size;
579 poly_uint64 int_size, int_elem_size;
580 unsigned HOST_WIDE_INT num_elems;
581 bool limit_p;
582
583 /* One-element arrays get the component type's mode. */
584 elem_size = TYPE_SIZE (elem_type);
585 if (simple_cst_equal (size, elem_size))
586 return TYPE_MODE (elem_type);
587
588 limit_p = true;
589 if (poly_int_tree_p (t: size, value: &int_size)
590 && poly_int_tree_p (t: elem_size, value: &int_elem_size)
591 && maybe_ne (a: int_elem_size, b: 0U)
592 && constant_multiple_p (a: int_size, b: int_elem_size, multiple: &num_elems))
593 {
594 machine_mode elem_mode = TYPE_MODE (elem_type);
595 machine_mode mode;
596 if (targetm.array_mode (elem_mode, num_elems).exists (mode: &mode))
597 return mode;
598 if (targetm.array_mode_supported_p (elem_mode, num_elems))
599 limit_p = false;
600 }
601 return mode_for_size_tree (size, mclass: MODE_INT, limit: limit_p).else_blk ();
602}
603
604/* Subroutine of layout_decl: Force alignment required for the data type.
605 But if the decl itself wants greater alignment, don't override that. */
606
607static inline void
608do_type_align (tree type, tree decl)
609{
610 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
611 {
612 SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
613 if (TREE_CODE (decl) == FIELD_DECL)
614 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
615 }
616 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
617 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
618}
619
620/* Set the size, mode and alignment of a ..._DECL node.
621 TYPE_DECL does need this for C++.
622 Note that LABEL_DECL and CONST_DECL nodes do not need this,
623 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
624 Don't call layout_decl for them.
625
626 KNOWN_ALIGN is the amount of alignment we can assume this
627 decl has with no special effort. It is relevant only for FIELD_DECLs
628 and depends on the previous fields.
629 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
630 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
631 the record will be aligned to suit. */
632
633void
634layout_decl (tree decl, unsigned int known_align)
635{
636 tree type = TREE_TYPE (decl);
637 enum tree_code code = TREE_CODE (decl);
638 rtx rtl = NULL_RTX;
639 location_t loc = DECL_SOURCE_LOCATION (decl);
640
641 if (code == CONST_DECL)
642 return;
643
644 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
645 || code == TYPE_DECL || code == FIELD_DECL);
646
647 rtl = DECL_RTL_IF_SET (decl);
648
649 if (type == error_mark_node)
650 type = void_type_node;
651
652 /* Usually the size and mode come from the data type without change,
653 however, the front-end may set the explicit width of the field, so its
654 size may not be the same as the size of its type. This happens with
655 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
656 also happens with other fields. For example, the C++ front-end creates
657 zero-sized fields corresponding to empty base classes, and depends on
658 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
659 size in bytes from the size in bits. If we have already set the mode,
660 don't set it again since we can be called twice for FIELD_DECLs. */
661
662 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
663 if (DECL_MODE (decl) == VOIDmode)
664 SET_DECL_MODE (decl, TYPE_MODE (type));
665
666 if (DECL_SIZE (decl) == 0)
667 {
668 DECL_SIZE (decl) = TYPE_SIZE (type);
669 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
670 }
671 else if (DECL_SIZE_UNIT (decl) == 0)
672 DECL_SIZE_UNIT (decl)
673 = fold_convert_loc (loc, sizetype,
674 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
675 bitsize_unit_node));
676
677 if (code != FIELD_DECL)
678 /* For non-fields, update the alignment from the type. */
679 do_type_align (type, decl);
680 else
681 /* For fields, it's a bit more complicated... */
682 {
683 bool old_user_align = DECL_USER_ALIGN (decl);
684 bool zero_bitfield = false;
685 bool packed_p = DECL_PACKED (decl);
686 unsigned int mfa;
687
688 if (DECL_BIT_FIELD (decl))
689 {
690 DECL_BIT_FIELD_TYPE (decl) = type;
691
692 /* A zero-length bit-field affects the alignment of the next
693 field. In essence such bit-fields are not influenced by
694 any packing due to #pragma pack or attribute packed. */
695 if (integer_zerop (DECL_SIZE (decl))
696 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
697 {
698 zero_bitfield = true;
699 packed_p = false;
700 if (PCC_BITFIELD_TYPE_MATTERS)
701 do_type_align (type, decl);
702 else
703 {
704#ifdef EMPTY_FIELD_BOUNDARY
705 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
706 {
707 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
708 DECL_USER_ALIGN (decl) = 0;
709 }
710#endif
711 }
712 }
713
714 /* See if we can use an ordinary integer mode for a bit-field.
715 Conditions are: a fixed size that is correct for another mode,
716 occupying a complete byte or bytes on proper boundary. */
717 if (TYPE_SIZE (type) != 0
718 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
719 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
720 {
721 machine_mode xmode;
722 if (mode_for_size_tree (DECL_SIZE (decl),
723 mclass: MODE_INT, limit: 1).exists (mode: &xmode))
724 {
725 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
726 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
727 && (known_align == 0 || known_align >= xalign))
728 {
729 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
730 SET_DECL_MODE (decl, xmode);
731 DECL_BIT_FIELD (decl) = 0;
732 }
733 }
734 }
735
736 /* Turn off DECL_BIT_FIELD if we won't need it set. */
737 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
738 && known_align >= TYPE_ALIGN (type)
739 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
740 DECL_BIT_FIELD (decl) = 0;
741 }
742 else if (packed_p && DECL_USER_ALIGN (decl))
743 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
744 round up; we'll reduce it again below. We want packing to
745 supersede USER_ALIGN inherited from the type, but defer to
746 alignment explicitly specified on the field decl. */;
747 else
748 do_type_align (type, decl);
749
750 /* If the field is packed and not explicitly aligned, give it the
751 minimum alignment. Note that do_type_align may set
752 DECL_USER_ALIGN, so we need to check old_user_align instead. */
753 if (packed_p
754 && !old_user_align)
755 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
756
757 if (! packed_p && ! DECL_USER_ALIGN (decl))
758 {
759 /* Some targets (i.e. i386, VMS) limit struct field alignment
760 to a lower boundary than alignment of variables unless
761 it was overridden by attribute aligned. */
762#ifdef BIGGEST_FIELD_ALIGNMENT
763 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
764 (unsigned) BIGGEST_FIELD_ALIGNMENT));
765#endif
766#ifdef ADJUST_FIELD_ALIGN
767 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
768 DECL_ALIGN (decl)));
769#endif
770 }
771
772 if (zero_bitfield)
773 mfa = initial_max_fld_align * BITS_PER_UNIT;
774 else
775 mfa = maximum_field_alignment;
776 /* Should this be controlled by DECL_USER_ALIGN, too? */
777 if (mfa != 0)
778 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
779 }
780
781 /* Evaluate nonconstant size only once, either now or as soon as safe. */
782 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
783 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
784 if (DECL_SIZE_UNIT (decl) != 0
785 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
786 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
787
788 /* If requested, warn about definitions of large data objects. */
789 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
790 && !DECL_EXTERNAL (decl))
791 {
792 tree size = DECL_SIZE_UNIT (decl);
793
794 if (size != 0 && TREE_CODE (size) == INTEGER_CST)
795 {
796 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
797 as if PTRDIFF_MAX had been specified, with the value
798 being that on the target rather than the host. */
799 unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
800 if (max_size == HOST_WIDE_INT_MAX)
801 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
802
803 if (compare_tree_int (size, max_size) > 0)
804 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
805 "maximum object size %wu",
806 decl, size, max_size);
807 }
808 }
809
810 /* If the RTL was already set, update its mode and mem attributes. */
811 if (rtl)
812 {
813 PUT_MODE (x: rtl, DECL_MODE (decl));
814 SET_DECL_RTL (decl, 0);
815 if (MEM_P (rtl))
816 set_mem_attributes (rtl, decl, 1);
817 SET_DECL_RTL (decl, rtl);
818 }
819}
820
821/* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
822 results of a previous call to layout_decl and calls it again. */
823
824void
825relayout_decl (tree decl)
826{
827 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
828 SET_DECL_MODE (decl, VOIDmode);
829 if (!DECL_USER_ALIGN (decl))
830 SET_DECL_ALIGN (decl, 0);
831 if (DECL_RTL_SET_P (decl))
832 SET_DECL_RTL (decl, 0);
833
834 layout_decl (decl, known_align: 0);
835}
836
837/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
838 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
839 is to be passed to all other layout functions for this record. It is the
840 responsibility of the caller to call `free' for the storage returned.
841 Note that garbage collection is not permitted until we finish laying
842 out the record. */
843
844record_layout_info
845start_record_layout (tree t)
846{
847 record_layout_info rli = XNEW (struct record_layout_info_s);
848
849 rli->t = t;
850
851 /* If the type has a minimum specified alignment (via an attribute
852 declaration, for example) use it -- otherwise, start with a
853 one-byte alignment. */
854 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
855 rli->unpacked_align = rli->record_align;
856 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
857
858#ifdef STRUCTURE_SIZE_BOUNDARY
859 /* Packed structures don't need to have minimum size. */
860 if (! TYPE_PACKED (t))
861 {
862 unsigned tmp;
863
864 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
865 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
866 if (maximum_field_alignment != 0)
867 tmp = MIN (tmp, maximum_field_alignment);
868 rli->record_align = MAX (rli->record_align, tmp);
869 }
870#endif
871
872 rli->offset = size_zero_node;
873 rli->bitpos = bitsize_zero_node;
874 rli->prev_field = 0;
875 rli->pending_statics = 0;
876 rli->packed_maybe_necessary = 0;
877 rli->remaining_in_alignment = 0;
878
879 return rli;
880}
881
882/* Fold sizetype value X to bitsizetype, given that X represents a type
883 size or offset. */
884
885static tree
886bits_from_bytes (tree x)
887{
888 if (POLY_INT_CST_P (x))
889 /* The runtime calculation isn't allowed to overflow sizetype;
890 increasing the runtime values must always increase the size
891 or offset of the object. This means that the object imposes
892 a maximum value on the runtime parameters, but we don't record
893 what that is. */
894 return build_poly_int_cst
895 (bitsizetype,
896 poly_wide_int::from (a: poly_int_cst_value (x),
897 TYPE_PRECISION (bitsizetype),
898 TYPE_SIGN (TREE_TYPE (x))));
899 x = fold_convert (bitsizetype, x);
900 gcc_checking_assert (x);
901 return x;
902}
903
904/* Return the combined bit position for the byte offset OFFSET and the
905 bit position BITPOS.
906
907 These functions operate on byte and bit positions present in FIELD_DECLs
908 and assume that these expressions result in no (intermediate) overflow.
909 This assumption is necessary to fold the expressions as much as possible,
910 so as to avoid creating artificially variable-sized types in languages
911 supporting variable-sized types like Ada. */
912
913tree
914bit_from_pos (tree offset, tree bitpos)
915{
916 return size_binop (PLUS_EXPR, bitpos,
917 size_binop (MULT_EXPR, bits_from_bytes (offset),
918 bitsize_unit_node));
919}
920
921/* Return the combined truncated byte position for the byte offset OFFSET and
922 the bit position BITPOS. */
923
924tree
925byte_from_pos (tree offset, tree bitpos)
926{
927 tree bytepos;
928 if (TREE_CODE (bitpos) == MULT_EXPR
929 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
930 bytepos = TREE_OPERAND (bitpos, 0);
931 else
932 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
933 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
934}
935
936/* Split the bit position POS into a byte offset *POFFSET and a bit
937 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
938
939void
940pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
941 tree pos)
942{
943 tree toff_align = bitsize_int (off_align);
944 if (TREE_CODE (pos) == MULT_EXPR
945 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
946 {
947 *poffset = size_binop (MULT_EXPR,
948 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
949 size_int (off_align / BITS_PER_UNIT));
950 *pbitpos = bitsize_zero_node;
951 }
952 else
953 {
954 *poffset = size_binop (MULT_EXPR,
955 fold_convert (sizetype,
956 size_binop (FLOOR_DIV_EXPR, pos,
957 toff_align)),
958 size_int (off_align / BITS_PER_UNIT));
959 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
960 }
961}
962
963/* Given a pointer to bit and byte offsets and an offset alignment,
964 normalize the offsets so they are within the alignment. */
965
966void
967normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
968{
969 /* If the bit position is now larger than it should be, adjust it
970 downwards. */
971 if (compare_tree_int (*pbitpos, off_align) >= 0)
972 {
973 tree offset, bitpos;
974 pos_from_bit (poffset: &offset, pbitpos: &bitpos, off_align, pos: *pbitpos);
975 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
976 *pbitpos = bitpos;
977 }
978}
979
980/* Print debugging information about the information in RLI. */
981
982DEBUG_FUNCTION void
983debug_rli (record_layout_info rli)
984{
985 print_node_brief (stderr, "type", rli->t, 0);
986 print_node_brief (stderr, "\noffset", rli->offset, 0);
987 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
988
989 fprintf (stderr, format: "\naligns: rec = %u, unpack = %u, off = %u\n",
990 rli->record_align, rli->unpacked_align,
991 rli->offset_align);
992
993 /* The ms_struct code is the only that uses this. */
994 if (targetm.ms_bitfield_layout_p (rli->t))
995 fprintf (stderr, format: "remaining in alignment = %u\n", rli->remaining_in_alignment);
996
997 if (rli->packed_maybe_necessary)
998 fprintf (stderr, format: "packed may be necessary\n");
999
1000 if (!vec_safe_is_empty (v: rli->pending_statics))
1001 {
1002 fprintf (stderr, format: "pending statics:\n");
1003 debug (ptr: rli->pending_statics);
1004 }
1005}
1006
1007/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
1008 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
1009
1010void
1011normalize_rli (record_layout_info rli)
1012{
1013 normalize_offset (poffset: &rli->offset, pbitpos: &rli->bitpos, off_align: rli->offset_align);
1014}
1015
1016/* Returns the size in bytes allocated so far. */
1017
1018tree
1019rli_size_unit_so_far (record_layout_info rli)
1020{
1021 return byte_from_pos (offset: rli->offset, bitpos: rli->bitpos);
1022}
1023
1024/* Returns the size in bits allocated so far. */
1025
1026tree
1027rli_size_so_far (record_layout_info rli)
1028{
1029 return bit_from_pos (offset: rli->offset, bitpos: rli->bitpos);
1030}
1031
1032/* FIELD is about to be added to RLI->T. The alignment (in bits) of
1033 the next available location within the record is given by KNOWN_ALIGN.
1034 Update the variable alignment fields in RLI, and return the alignment
1035 to give the FIELD. */
1036
1037unsigned int
1038update_alignment_for_field (record_layout_info rli, tree field,
1039 unsigned int known_align)
1040{
1041 /* The alignment required for FIELD. */
1042 unsigned int desired_align;
1043 /* The type of this field. */
1044 tree type = TREE_TYPE (field);
1045 /* True if the field was explicitly aligned by the user. */
1046 bool user_align;
1047 bool is_bitfield;
1048
1049 /* Do not attempt to align an ERROR_MARK node */
1050 if (TREE_CODE (type) == ERROR_MARK)
1051 return 0;
1052
1053 /* Lay out the field so we know what alignment it needs. */
1054 layout_decl (decl: field, known_align);
1055 desired_align = DECL_ALIGN (field);
1056 user_align = DECL_USER_ALIGN (field);
1057
1058 is_bitfield = (type != error_mark_node
1059 && DECL_BIT_FIELD_TYPE (field)
1060 && ! integer_zerop (TYPE_SIZE (type)));
1061
1062 /* Record must have at least as much alignment as any field.
1063 Otherwise, the alignment of the field within the record is
1064 meaningless. */
1065 if (targetm.ms_bitfield_layout_p (rli->t))
1066 {
1067 /* Here, the alignment of the underlying type of a bitfield can
1068 affect the alignment of a record; even a zero-sized field
1069 can do this. The alignment should be to the alignment of
1070 the type, except that for zero-size bitfields this only
1071 applies if there was an immediately prior, nonzero-size
1072 bitfield. (That's the way it is, experimentally.) */
1073 if (!is_bitfield
1074 || ((DECL_SIZE (field) == NULL_TREE
1075 || !integer_zerop (DECL_SIZE (field)))
1076 ? !DECL_PACKED (field)
1077 : (rli->prev_field
1078 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1079 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1080 {
1081 unsigned int type_align = TYPE_ALIGN (type);
1082 if (!is_bitfield && DECL_PACKED (field))
1083 type_align = desired_align;
1084 else
1085 type_align = MAX (type_align, desired_align);
1086 if (maximum_field_alignment != 0)
1087 type_align = MIN (type_align, maximum_field_alignment);
1088 rli->record_align = MAX (rli->record_align, type_align);
1089 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1090 }
1091 }
1092 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1093 {
1094 /* Named bit-fields cause the entire structure to have the
1095 alignment implied by their type. Some targets also apply the same
1096 rules to unnamed bitfields. */
1097 if (DECL_NAME (field) != 0
1098 || targetm.align_anon_bitfield ())
1099 {
1100 unsigned int type_align = TYPE_ALIGN (type);
1101
1102#ifdef ADJUST_FIELD_ALIGN
1103 if (! TYPE_USER_ALIGN (type))
1104 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1105#endif
1106
1107 /* Targets might chose to handle unnamed and hence possibly
1108 zero-width bitfield. Those are not influenced by #pragmas
1109 or packed attributes. */
1110 if (integer_zerop (DECL_SIZE (field)))
1111 {
1112 if (initial_max_fld_align)
1113 type_align = MIN (type_align,
1114 initial_max_fld_align * BITS_PER_UNIT);
1115 }
1116 else if (maximum_field_alignment != 0)
1117 type_align = MIN (type_align, maximum_field_alignment);
1118 else if (DECL_PACKED (field))
1119 type_align = MIN (type_align, BITS_PER_UNIT);
1120
1121 /* The alignment of the record is increased to the maximum
1122 of the current alignment, the alignment indicated on the
1123 field (i.e., the alignment specified by an __aligned__
1124 attribute), and the alignment indicated by the type of
1125 the field. */
1126 rli->record_align = MAX (rli->record_align, desired_align);
1127 rli->record_align = MAX (rli->record_align, type_align);
1128
1129 if (warn_packed)
1130 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1131 user_align |= TYPE_USER_ALIGN (type);
1132 }
1133 }
1134 else
1135 {
1136 rli->record_align = MAX (rli->record_align, desired_align);
1137 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1138 }
1139
1140 TYPE_USER_ALIGN (rli->t) |= user_align;
1141
1142 return desired_align;
1143}
1144
1145/* Issue a warning if the record alignment, RECORD_ALIGN, is less than
1146 the field alignment of FIELD or FIELD isn't aligned. */
1147
1148static void
1149handle_warn_if_not_align (tree field, unsigned int record_align)
1150{
1151 tree type = TREE_TYPE (field);
1152
1153 if (type == error_mark_node)
1154 return;
1155
1156 unsigned int warn_if_not_align = 0;
1157
1158 int opt_w = 0;
1159
1160 if (warn_if_not_aligned)
1161 {
1162 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
1163 if (!warn_if_not_align)
1164 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
1165 if (warn_if_not_align)
1166 opt_w = OPT_Wif_not_aligned;
1167 }
1168
1169 if (!warn_if_not_align
1170 && warn_packed_not_aligned
1171 && lookup_attribute (attr_name: "aligned", TYPE_ATTRIBUTES (type)))
1172 {
1173 warn_if_not_align = TYPE_ALIGN (type);
1174 opt_w = OPT_Wpacked_not_aligned;
1175 }
1176
1177 if (!warn_if_not_align)
1178 return;
1179
1180 tree context = DECL_CONTEXT (field);
1181
1182 warn_if_not_align /= BITS_PER_UNIT;
1183 record_align /= BITS_PER_UNIT;
1184 if ((record_align % warn_if_not_align) != 0)
1185 warning (opt_w, "alignment %u of %qT is less than %u",
1186 record_align, context, warn_if_not_align);
1187
1188 tree off = byte_position (field);
1189 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
1190 {
1191 if (TREE_CODE (off) == INTEGER_CST)
1192 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
1193 field, off, context, warn_if_not_align);
1194 else
1195 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
1196 field, off, context, warn_if_not_align);
1197 }
1198}
1199
1200/* Called from place_field to handle unions. */
1201
1202static void
1203place_union_field (record_layout_info rli, tree field)
1204{
1205 update_alignment_for_field (rli, field, /*known_align=*/0);
1206
1207 DECL_FIELD_OFFSET (field) = size_zero_node;
1208 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1209 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1210 handle_warn_if_not_align (field, record_align: rli->record_align);
1211
1212 /* If this is an ERROR_MARK return *after* having set the
1213 field at the start of the union. This helps when parsing
1214 invalid fields. */
1215 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1216 return;
1217
1218 if (AGGREGATE_TYPE_P (TREE_TYPE (field))
1219 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
1220 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1221
1222 /* We assume the union's size will be a multiple of a byte so we don't
1223 bother with BITPOS. */
1224 if (TREE_CODE (rli->t) == UNION_TYPE)
1225 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1226 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1227 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1228 DECL_SIZE_UNIT (field), rli->offset);
1229}
1230
1231/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1232 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1233 units of alignment than the underlying TYPE. */
1234static int
1235excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1236 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1237{
1238 /* Note that the calculation of OFFSET might overflow; we calculate it so
1239 that we still get the right result as long as ALIGN is a power of two. */
1240 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1241
1242 offset = offset % align;
1243 return ((offset + size + align - 1) / align
1244 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1245}
1246
1247/* RLI contains information about the layout of a RECORD_TYPE. FIELD
1248 is a FIELD_DECL to be added after those fields already present in
1249 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1250 callers that desire that behavior must manually perform that step.) */
1251
1252void
1253place_field (record_layout_info rli, tree field)
1254{
1255 /* The alignment required for FIELD. */
1256 unsigned int desired_align;
1257 /* The alignment FIELD would have if we just dropped it into the
1258 record as it presently stands. */
1259 unsigned int known_align;
1260 unsigned int actual_align;
1261 /* The type of this field. */
1262 tree type = TREE_TYPE (field);
1263
1264 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1265
1266 /* If FIELD is static, then treat it like a separate variable, not
1267 really like a structure field. If it is a FUNCTION_DECL, it's a
1268 method. In both cases, all we do is lay out the decl, and we do
1269 it *after* the record is laid out. */
1270 if (VAR_P (field))
1271 {
1272 vec_safe_push (v&: rli->pending_statics, obj: field);
1273 return;
1274 }
1275
1276 /* Enumerators and enum types which are local to this class need not
1277 be laid out. Likewise for initialized constant fields. */
1278 else if (TREE_CODE (field) != FIELD_DECL)
1279 return;
1280
1281 /* Unions are laid out very differently than records, so split
1282 that code off to another function. */
1283 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1284 {
1285 place_union_field (rli, field);
1286 return;
1287 }
1288
1289 else if (TREE_CODE (type) == ERROR_MARK)
1290 {
1291 /* Place this field at the current allocation position, so we
1292 maintain monotonicity. */
1293 DECL_FIELD_OFFSET (field) = rli->offset;
1294 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1295 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1296 handle_warn_if_not_align (field, record_align: rli->record_align);
1297 return;
1298 }
1299
1300 if (AGGREGATE_TYPE_P (type)
1301 && TYPE_TYPELESS_STORAGE (type))
1302 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1303
1304 /* Work out the known alignment so far. Note that A & (-A) is the
1305 value of the least-significant bit in A that is one. */
1306 if (! integer_zerop (rli->bitpos))
1307 known_align = least_bit_hwi (x: tree_to_uhwi (rli->bitpos));
1308 else if (integer_zerop (rli->offset))
1309 known_align = 0;
1310 else if (tree_fits_uhwi_p (rli->offset))
1311 known_align = (BITS_PER_UNIT
1312 * least_bit_hwi (x: tree_to_uhwi (rli->offset)));
1313 else
1314 known_align = rli->offset_align;
1315
1316 desired_align = update_alignment_for_field (rli, field, known_align);
1317 if (known_align == 0)
1318 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1319
1320 if (warn_packed && DECL_PACKED (field))
1321 {
1322 if (known_align >= TYPE_ALIGN (type))
1323 {
1324 if (TYPE_ALIGN (type) > desired_align)
1325 {
1326 if (STRICT_ALIGNMENT)
1327 warning (OPT_Wattributes, "packed attribute causes "
1328 "inefficient alignment for %q+D", field);
1329 /* Don't warn if DECL_PACKED was set by the type. */
1330 else if (!TYPE_PACKED (rli->t))
1331 warning (OPT_Wattributes, "packed attribute is "
1332 "unnecessary for %q+D", field);
1333 }
1334 }
1335 else
1336 rli->packed_maybe_necessary = 1;
1337 }
1338
1339 /* Does this field automatically have alignment it needs by virtue
1340 of the fields that precede it and the record's own alignment? */
1341 if (known_align < desired_align
1342 && (! targetm.ms_bitfield_layout_p (rli->t)
1343 || rli->prev_field == NULL))
1344 {
1345 /* No, we need to skip space before this field.
1346 Bump the cumulative size to multiple of field alignment. */
1347
1348 if (!targetm.ms_bitfield_layout_p (rli->t)
1349 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION
1350 && !TYPE_ARTIFICIAL (rli->t))
1351 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1352
1353 /* If the alignment is still within offset_align, just align
1354 the bit position. */
1355 if (desired_align < rli->offset_align)
1356 rli->bitpos = round_up (rli->bitpos, desired_align);
1357 else
1358 {
1359 /* First adjust OFFSET by the partial bits, then align. */
1360 rli->offset
1361 = size_binop (PLUS_EXPR, rli->offset,
1362 fold_convert (sizetype,
1363 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1364 bitsize_unit_node)));
1365 rli->bitpos = bitsize_zero_node;
1366
1367 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1368 }
1369
1370 if (! TREE_CONSTANT (rli->offset))
1371 rli->offset_align = desired_align;
1372 }
1373
1374 /* Handle compatibility with PCC. Note that if the record has any
1375 variable-sized fields, we need not worry about compatibility. */
1376 if (PCC_BITFIELD_TYPE_MATTERS
1377 && ! targetm.ms_bitfield_layout_p (rli->t)
1378 && TREE_CODE (field) == FIELD_DECL
1379 && type != error_mark_node
1380 && DECL_BIT_FIELD (field)
1381 && (! DECL_PACKED (field)
1382 /* Enter for these packed fields only to issue a warning. */
1383 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1384 && maximum_field_alignment == 0
1385 && ! integer_zerop (DECL_SIZE (field))
1386 && tree_fits_uhwi_p (DECL_SIZE (field))
1387 && tree_fits_uhwi_p (rli->offset)
1388 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1389 {
1390 unsigned int type_align = TYPE_ALIGN (type);
1391 tree dsize = DECL_SIZE (field);
1392 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1393 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1394 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1395
1396#ifdef ADJUST_FIELD_ALIGN
1397 if (! TYPE_USER_ALIGN (type))
1398 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1399#endif
1400
1401 /* A bit field may not span more units of alignment of its type
1402 than its type itself. Advance to next boundary if necessary. */
1403 if (excess_unit_span (byte_offset: offset, bit_offset, size: field_size, align: type_align, type))
1404 {
1405 if (DECL_PACKED (field))
1406 {
1407 if (warn_packed_bitfield_compat == 1)
1408 inform
1409 (input_location,
1410 "offset of packed bit-field %qD has changed in GCC 4.4",
1411 field);
1412 }
1413 else
1414 rli->bitpos = round_up (rli->bitpos, type_align);
1415 }
1416
1417 if (! DECL_PACKED (field))
1418 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1419
1420 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1421 TYPE_WARN_IF_NOT_ALIGN (type));
1422 }
1423
1424#ifdef BITFIELD_NBYTES_LIMITED
1425 if (BITFIELD_NBYTES_LIMITED
1426 && ! targetm.ms_bitfield_layout_p (rli->t)
1427 && TREE_CODE (field) == FIELD_DECL
1428 && type != error_mark_node
1429 && DECL_BIT_FIELD_TYPE (field)
1430 && ! DECL_PACKED (field)
1431 && ! integer_zerop (DECL_SIZE (field))
1432 && tree_fits_uhwi_p (DECL_SIZE (field))
1433 && tree_fits_uhwi_p (rli->offset)
1434 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1435 {
1436 unsigned int type_align = TYPE_ALIGN (type);
1437 tree dsize = DECL_SIZE (field);
1438 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1439 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1440 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1441
1442#ifdef ADJUST_FIELD_ALIGN
1443 if (! TYPE_USER_ALIGN (type))
1444 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1445#endif
1446
1447 if (maximum_field_alignment != 0)
1448 type_align = MIN (type_align, maximum_field_alignment);
1449 /* ??? This test is opposite the test in the containing if
1450 statement, so this code is unreachable currently. */
1451 else if (DECL_PACKED (field))
1452 type_align = MIN (type_align, BITS_PER_UNIT);
1453
1454 /* A bit field may not span the unit of alignment of its type.
1455 Advance to next boundary if necessary. */
1456 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1457 rli->bitpos = round_up (rli->bitpos, type_align);
1458
1459 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1460 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1461 TYPE_WARN_IF_NOT_ALIGN (type));
1462 }
1463#endif
1464
1465 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1466 A subtlety:
1467 When a bit field is inserted into a packed record, the whole
1468 size of the underlying type is used by one or more same-size
1469 adjacent bitfields. (That is, if its long:3, 32 bits is
1470 used in the record, and any additional adjacent long bitfields are
1471 packed into the same chunk of 32 bits. However, if the size
1472 changes, a new field of that size is allocated.) In an unpacked
1473 record, this is the same as using alignment, but not equivalent
1474 when packing.
1475
1476 Note: for compatibility, we use the type size, not the type alignment
1477 to determine alignment, since that matches the documentation */
1478
1479 if (targetm.ms_bitfield_layout_p (rli->t))
1480 {
1481 tree prev_saved = rli->prev_field;
1482 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1483
1484 /* This is a bitfield if it exists. */
1485 if (rli->prev_field)
1486 {
1487 bool realign_p = known_align < desired_align;
1488
1489 /* If both are bitfields, nonzero, and the same size, this is
1490 the middle of a run. Zero declared size fields are special
1491 and handled as "end of run". (Note: it's nonzero declared
1492 size, but equal type sizes!) (Since we know that both
1493 the current and previous fields are bitfields by the
1494 time we check it, DECL_SIZE must be present for both.) */
1495 if (DECL_BIT_FIELD_TYPE (field)
1496 && !integer_zerop (DECL_SIZE (field))
1497 && !integer_zerop (DECL_SIZE (rli->prev_field))
1498 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1499 && tree_fits_uhwi_p (TYPE_SIZE (type))
1500 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1501 {
1502 /* We're in the middle of a run of equal type size fields; make
1503 sure we realign if we run out of bits. (Not decl size,
1504 type size!) */
1505 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1506
1507 if (rli->remaining_in_alignment < bitsize)
1508 {
1509 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1510
1511 /* out of bits; bump up to next 'word'. */
1512 rli->bitpos
1513 = size_binop (PLUS_EXPR, rli->bitpos,
1514 bitsize_int (rli->remaining_in_alignment));
1515 rli->prev_field = field;
1516 if (typesize < bitsize)
1517 rli->remaining_in_alignment = 0;
1518 else
1519 rli->remaining_in_alignment = typesize - bitsize;
1520 }
1521 else
1522 {
1523 rli->remaining_in_alignment -= bitsize;
1524 realign_p = false;
1525 }
1526 }
1527 else
1528 {
1529 /* End of a run: if leaving a run of bitfields of the same type
1530 size, we have to "use up" the rest of the bits of the type
1531 size.
1532
1533 Compute the new position as the sum of the size for the prior
1534 type and where we first started working on that type.
1535 Note: since the beginning of the field was aligned then
1536 of course the end will be too. No round needed. */
1537
1538 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1539 {
1540 rli->bitpos
1541 = size_binop (PLUS_EXPR, rli->bitpos,
1542 bitsize_int (rli->remaining_in_alignment));
1543 }
1544 else
1545 /* We "use up" size zero fields; the code below should behave
1546 as if the prior field was not a bitfield. */
1547 prev_saved = NULL;
1548
1549 /* Cause a new bitfield to be captured, either this time (if
1550 currently a bitfield) or next time we see one. */
1551 if (!DECL_BIT_FIELD_TYPE (field)
1552 || integer_zerop (DECL_SIZE (field)))
1553 rli->prev_field = NULL;
1554 }
1555
1556 /* Does this field automatically have alignment it needs by virtue
1557 of the fields that precede it and the record's own alignment? */
1558 if (realign_p)
1559 {
1560 /* If the alignment is still within offset_align, just align
1561 the bit position. */
1562 if (desired_align < rli->offset_align)
1563 rli->bitpos = round_up (rli->bitpos, desired_align);
1564 else
1565 {
1566 /* First adjust OFFSET by the partial bits, then align. */
1567 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
1568 bitsize_unit_node);
1569 rli->offset = size_binop (PLUS_EXPR, rli->offset,
1570 fold_convert (sizetype, d));
1571 rli->bitpos = bitsize_zero_node;
1572
1573 rli->offset = round_up (rli->offset,
1574 desired_align / BITS_PER_UNIT);
1575 }
1576
1577 if (! TREE_CONSTANT (rli->offset))
1578 rli->offset_align = desired_align;
1579 }
1580
1581 normalize_rli (rli);
1582 }
1583
1584 /* If we're starting a new run of same type size bitfields
1585 (or a run of non-bitfields), set up the "first of the run"
1586 fields.
1587
1588 That is, if the current field is not a bitfield, or if there
1589 was a prior bitfield the type sizes differ, or if there wasn't
1590 a prior bitfield the size of the current field is nonzero.
1591
1592 Note: we must be sure to test ONLY the type size if there was
1593 a prior bitfield and ONLY for the current field being zero if
1594 there wasn't. */
1595
1596 if (!DECL_BIT_FIELD_TYPE (field)
1597 || (prev_saved != NULL
1598 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1599 : !integer_zerop (DECL_SIZE (field))))
1600 {
1601 /* Never smaller than a byte for compatibility. */
1602 unsigned int type_align = BITS_PER_UNIT;
1603
1604 /* (When not a bitfield), we could be seeing a flex array (with
1605 no DECL_SIZE). Since we won't be using remaining_in_alignment
1606 until we see a bitfield (and come by here again) we just skip
1607 calculating it. */
1608 if (DECL_SIZE (field) != NULL
1609 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1610 && tree_fits_uhwi_p (DECL_SIZE (field)))
1611 {
1612 unsigned HOST_WIDE_INT bitsize
1613 = tree_to_uhwi (DECL_SIZE (field));
1614 unsigned HOST_WIDE_INT typesize
1615 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1616
1617 if (typesize < bitsize)
1618 rli->remaining_in_alignment = 0;
1619 else
1620 rli->remaining_in_alignment = typesize - bitsize;
1621 }
1622
1623 /* Now align (conventionally) for the new type. */
1624 if (! DECL_PACKED (field))
1625 type_align = TYPE_ALIGN (TREE_TYPE (field));
1626
1627 if (maximum_field_alignment != 0)
1628 type_align = MIN (type_align, maximum_field_alignment);
1629
1630 rli->bitpos = round_up (rli->bitpos, type_align);
1631
1632 /* If we really aligned, don't allow subsequent bitfields
1633 to undo that. */
1634 rli->prev_field = NULL;
1635 }
1636 }
1637
1638 /* Offset so far becomes the position of this field after normalizing. */
1639 normalize_rli (rli);
1640 DECL_FIELD_OFFSET (field) = rli->offset;
1641 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1642 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1643 handle_warn_if_not_align (field, record_align: rli->record_align);
1644
1645 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1646 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1647 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1648
1649 /* If this field ended up more aligned than we thought it would be (we
1650 approximate this by seeing if its position changed), lay out the field
1651 again; perhaps we can use an integral mode for it now. */
1652 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1653 actual_align = least_bit_hwi (x: tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1654 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1655 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1656 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1657 actual_align = (BITS_PER_UNIT
1658 * least_bit_hwi (x: tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1659 else
1660 actual_align = DECL_OFFSET_ALIGN (field);
1661 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1662 store / extract bit field operations will check the alignment of the
1663 record against the mode of bit fields. */
1664
1665 if (known_align != actual_align)
1666 layout_decl (decl: field, known_align: actual_align);
1667
1668 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1669 rli->prev_field = field;
1670
1671 /* Now add size of this field to the size of the record. If the size is
1672 not constant, treat the field as being a multiple of bytes and just
1673 adjust the offset, resetting the bit position. Otherwise, apportion the
1674 size amongst the bit position and offset. First handle the case of an
1675 unspecified size, which can happen when we have an invalid nested struct
1676 definition, such as struct j { struct j { int i; } }. The error message
1677 is printed in finish_struct. */
1678 if (DECL_SIZE (field) == 0)
1679 /* Do nothing. */;
1680 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1681 || TREE_OVERFLOW (DECL_SIZE (field)))
1682 {
1683 rli->offset
1684 = size_binop (PLUS_EXPR, rli->offset,
1685 fold_convert (sizetype,
1686 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1687 bitsize_unit_node)));
1688 rli->offset
1689 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1690 rli->bitpos = bitsize_zero_node;
1691 rli->offset_align = MIN (rli->offset_align, desired_align);
1692
1693 if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
1694 bitsize_int (rli->offset_align)))
1695 {
1696 tree type = strip_array_types (TREE_TYPE (field));
1697 /* The above adjusts offset_align just based on the start of the
1698 field. The field might not have a size that is a multiple of
1699 that offset_align though. If the field is an array of fixed
1700 sized elements, assume there can be any multiple of those
1701 sizes. If it is a variable length aggregate or array of
1702 variable length aggregates, assume worst that the end is
1703 just BITS_PER_UNIT aligned. */
1704 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
1705 {
1706 if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
1707 {
1708 unsigned HOST_WIDE_INT sz
1709 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
1710 rli->offset_align = MIN (rli->offset_align, sz);
1711 }
1712 }
1713 else
1714 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
1715 }
1716 }
1717 else if (targetm.ms_bitfield_layout_p (rli->t))
1718 {
1719 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1720
1721 /* If FIELD is the last field and doesn't end at the full length
1722 of the type then pad the struct out to the full length of the
1723 last type. */
1724 if (DECL_BIT_FIELD_TYPE (field)
1725 && !integer_zerop (DECL_SIZE (field)))
1726 {
1727 /* We have to scan, because non-field DECLS are also here. */
1728 tree probe = field;
1729 while ((probe = DECL_CHAIN (probe)))
1730 if (TREE_CODE (probe) == FIELD_DECL)
1731 break;
1732 if (!probe)
1733 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1734 bitsize_int (rli->remaining_in_alignment));
1735 }
1736
1737 normalize_rli (rli);
1738 }
1739 else
1740 {
1741 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1742 normalize_rli (rli);
1743 }
1744}
1745
1746/* Assuming that all the fields have been laid out, this function uses
1747 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1748 indicated by RLI. */
1749
1750static void
1751finalize_record_size (record_layout_info rli)
1752{
1753 tree unpadded_size, unpadded_size_unit;
1754
1755 /* Now we want just byte and bit offsets, so set the offset alignment
1756 to be a byte and then normalize. */
1757 rli->offset_align = BITS_PER_UNIT;
1758 normalize_rli (rli);
1759
1760 /* Determine the desired alignment. */
1761#ifdef ROUND_TYPE_ALIGN
1762 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1763 rli->record_align));
1764#else
1765 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
1766#endif
1767
1768 /* Compute the size so far. Be sure to allow for extra bits in the
1769 size in bytes. We have guaranteed above that it will be no more
1770 than a single byte. */
1771 unpadded_size = rli_size_so_far (rli);
1772 unpadded_size_unit = rli_size_unit_so_far (rli);
1773 if (! integer_zerop (rli->bitpos))
1774 unpadded_size_unit
1775 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1776
1777 /* Round the size up to be a multiple of the required alignment. */
1778 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1779 TYPE_SIZE_UNIT (rli->t)
1780 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1781
1782 if (TREE_CONSTANT (unpadded_size)
1783 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1784 && input_location != BUILTINS_LOCATION
1785 && !TYPE_ARTIFICIAL (rli->t))
1786 {
1787 tree pad_size
1788 = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (rli->t), unpadded_size_unit);
1789 warning (OPT_Wpadded,
1790 "padding struct size to alignment boundary with %E bytes", pad_size);
1791 }
1792
1793 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1794 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1795 && TREE_CONSTANT (unpadded_size))
1796 {
1797 tree unpacked_size;
1798
1799#ifdef ROUND_TYPE_ALIGN
1800 rli->unpacked_align
1801 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1802#else
1803 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1804#endif
1805
1806 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1807 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1808 {
1809 if (TYPE_NAME (rli->t))
1810 {
1811 tree name;
1812
1813 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1814 name = TYPE_NAME (rli->t);
1815 else
1816 name = DECL_NAME (TYPE_NAME (rli->t));
1817
1818 if (STRICT_ALIGNMENT)
1819 warning (OPT_Wpacked, "packed attribute causes inefficient "
1820 "alignment for %qE", name);
1821 else
1822 warning (OPT_Wpacked,
1823 "packed attribute is unnecessary for %qE", name);
1824 }
1825 else
1826 {
1827 if (STRICT_ALIGNMENT)
1828 warning (OPT_Wpacked,
1829 "packed attribute causes inefficient alignment");
1830 else
1831 warning (OPT_Wpacked, "packed attribute is unnecessary");
1832 }
1833 }
1834 }
1835}
1836
1837/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1838
1839void
1840compute_record_mode (tree type)
1841{
1842 tree field;
1843 machine_mode mode = VOIDmode;
1844
1845 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1846 However, if possible, we use a mode that fits in a register
1847 instead, in order to allow for better optimization down the
1848 line. */
1849 SET_TYPE_MODE (type, BLKmode);
1850
1851 poly_uint64 type_size;
1852 if (!poly_int_tree_p (TYPE_SIZE (type), value: &type_size))
1853 return;
1854
1855 /* A record which has any BLKmode members must itself be
1856 BLKmode; it can't go in a register. Unless the member is
1857 BLKmode only because it isn't aligned. */
1858 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1859 {
1860 if (TREE_CODE (field) != FIELD_DECL)
1861 continue;
1862
1863 poly_uint64 field_size;
1864 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1865 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1866 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1867 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1868 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1869 || !tree_fits_poly_uint64_p (bit_position (field))
1870 || DECL_SIZE (field) == 0
1871 || !poly_int_tree_p (DECL_SIZE (field), value: &field_size))
1872 return;
1873
1874 /* If this field is the whole struct, remember its mode so
1875 that, say, we can put a double in a class into a DF
1876 register instead of forcing it to live in the stack. */
1877 if (known_eq (field_size, type_size)
1878 /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
1879 wider types (e.g. int32), despite precision being less. Ensure
1880 that the TYPE_MODE of the struct does not get set to the partial
1881 int mode if there is a wider type also in the struct. */
1882 && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
1883 GET_MODE_PRECISION (mode)))
1884 mode = DECL_MODE (field);
1885
1886 /* With some targets, it is sub-optimal to access an aligned
1887 BLKmode structure as a scalar. */
1888 if (targetm.member_type_forces_blk (field, mode))
1889 return;
1890 }
1891
1892 /* If we only have one real field; use its mode if that mode's size
1893 matches the type's size. This generally only applies to RECORD_TYPE.
1894 For UNION_TYPE, if the widest field is MODE_INT then use that mode.
1895 If the widest field is MODE_PARTIAL_INT, and the union will be passed
1896 by reference, then use that mode. */
1897 if ((TREE_CODE (type) == RECORD_TYPE
1898 || (TREE_CODE (type) == UNION_TYPE
1899 && (GET_MODE_CLASS (mode) == MODE_INT
1900 || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
1901 && (targetm.calls.pass_by_reference
1902 (pack_cumulative_args (arg: 0),
1903 function_arg_info (type, mode, /*named=*/false)))))))
1904 && mode != VOIDmode
1905 && known_eq (GET_MODE_BITSIZE (mode), type_size))
1906 ;
1907 else
1908 mode = mode_for_size_tree (TYPE_SIZE (type), mclass: MODE_INT, limit: 1).else_blk ();
1909
1910 /* If structure's known alignment is less than what the scalar
1911 mode would need, and it matters, then stick with BLKmode. */
1912 if (mode != BLKmode
1913 && STRICT_ALIGNMENT
1914 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1915 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
1916 {
1917 /* If this is the only reason this type is BLKmode, then
1918 don't force containing types to be BLKmode. */
1919 TYPE_NO_FORCE_BLK (type) = 1;
1920 mode = BLKmode;
1921 }
1922
1923 SET_TYPE_MODE (type, mode);
1924}
1925
1926/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1927 out. */
1928
1929static void
1930finalize_type_size (tree type)
1931{
1932 /* Normally, use the alignment corresponding to the mode chosen.
1933 However, where strict alignment is not required, avoid
1934 over-aligning structures, since most compilers do not do this
1935 alignment. */
1936 bool tua_cleared_p = false;
1937 if (TYPE_MODE (type) != BLKmode
1938 && TYPE_MODE (type) != VOIDmode
1939 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
1940 {
1941 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1942
1943 /* Don't override a larger alignment requirement coming from a user
1944 alignment of one of the fields. */
1945 if (mode_align >= TYPE_ALIGN (type))
1946 {
1947 SET_TYPE_ALIGN (type, mode_align);
1948 /* Remember that we're about to reset this flag. */
1949 tua_cleared_p = TYPE_USER_ALIGN (type);
1950 TYPE_USER_ALIGN (type) = false;
1951 }
1952 }
1953
1954 /* Do machine-dependent extra alignment. */
1955#ifdef ROUND_TYPE_ALIGN
1956 SET_TYPE_ALIGN (type,
1957 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
1958#endif
1959
1960 /* If we failed to find a simple way to calculate the unit size
1961 of the type, find it by division. */
1962 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1963 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1964 result will fit in sizetype. We will get more efficient code using
1965 sizetype, so we force a conversion. */
1966 TYPE_SIZE_UNIT (type)
1967 = fold_convert (sizetype,
1968 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1969 bitsize_unit_node));
1970
1971 if (TYPE_SIZE (type) != 0)
1972 {
1973 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1974 TYPE_SIZE_UNIT (type)
1975 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1976 }
1977
1978 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1979 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1980 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1981 if (TYPE_SIZE_UNIT (type) != 0
1982 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1983 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1984
1985 /* Handle empty records as per the x86-64 psABI. */
1986 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
1987
1988 /* Also layout any other variants of the type. */
1989 if (TYPE_NEXT_VARIANT (type)
1990 || type != TYPE_MAIN_VARIANT (type))
1991 {
1992 tree variant;
1993 /* Record layout info of this variant. */
1994 tree size = TYPE_SIZE (type);
1995 tree size_unit = TYPE_SIZE_UNIT (type);
1996 unsigned int align = TYPE_ALIGN (type);
1997 unsigned int precision = TYPE_PRECISION (type);
1998 unsigned int user_align = TYPE_USER_ALIGN (type);
1999 machine_mode mode = TYPE_MODE (type);
2000 bool empty_p = TYPE_EMPTY_P (type);
2001 bool typeless = AGGREGATE_TYPE_P (type) && TYPE_TYPELESS_STORAGE (type);
2002
2003 /* Copy it into all variants. */
2004 for (variant = TYPE_MAIN_VARIANT (type);
2005 variant != NULL_TREE;
2006 variant = TYPE_NEXT_VARIANT (variant))
2007 {
2008 TYPE_SIZE (variant) = size;
2009 TYPE_SIZE_UNIT (variant) = size_unit;
2010 unsigned valign = align;
2011 if (TYPE_USER_ALIGN (variant))
2012 {
2013 valign = MAX (valign, TYPE_ALIGN (variant));
2014 /* If we reset TYPE_USER_ALIGN on the main variant, we might
2015 need to reset it on the variants too. TYPE_MODE will be set
2016 to MODE in this variant, so we can use that. */
2017 if (tua_cleared_p && GET_MODE_ALIGNMENT (mode) >= valign)
2018 TYPE_USER_ALIGN (variant) = false;
2019 }
2020 else
2021 TYPE_USER_ALIGN (variant) = user_align;
2022 SET_TYPE_ALIGN (variant, valign);
2023 TYPE_PRECISION (variant) = precision;
2024 SET_TYPE_MODE (variant, mode);
2025 TYPE_EMPTY_P (variant) = empty_p;
2026 if (AGGREGATE_TYPE_P (variant))
2027 TYPE_TYPELESS_STORAGE (variant) = typeless;
2028 }
2029 }
2030}
2031
2032/* Return a new underlying object for a bitfield started with FIELD. */
2033
2034static tree
2035start_bitfield_representative (tree field)
2036{
2037 tree repr = make_node (FIELD_DECL);
2038 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
2039 /* Force the representative to begin at a BITS_PER_UNIT aligned
2040 boundary - C++ may use tail-padding of a base object to
2041 continue packing bits so the bitfield region does not start
2042 at bit zero (see g++.dg/abi/bitfield5.C for example).
2043 Unallocated bits may happen for other reasons as well,
2044 for example Ada which allows explicit bit-granular structure layout. */
2045 DECL_FIELD_BIT_OFFSET (repr)
2046 = size_binop (BIT_AND_EXPR,
2047 DECL_FIELD_BIT_OFFSET (field),
2048 bitsize_int (~(BITS_PER_UNIT - 1)));
2049 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
2050 DECL_SIZE (repr) = DECL_SIZE (field);
2051 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
2052 DECL_PACKED (repr) = DECL_PACKED (field);
2053 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
2054 /* There are no indirect accesses to this field. If we introduce
2055 some then they have to use the record alias set. This makes
2056 sure to properly conflict with [indirect] accesses to addressable
2057 fields of the bitfield group. */
2058 DECL_NONADDRESSABLE_P (repr) = 1;
2059 return repr;
2060}
2061
2062/* Finish up a bitfield group that was started by creating the underlying
2063 object REPR with the last field in the bitfield group FIELD. */
2064
2065static void
2066finish_bitfield_representative (tree repr, tree field)
2067{
2068 unsigned HOST_WIDE_INT bitsize, maxbitsize;
2069 tree nextf, size;
2070
2071 size = size_diffop (DECL_FIELD_OFFSET (field),
2072 DECL_FIELD_OFFSET (repr));
2073 while (TREE_CODE (size) == COMPOUND_EXPR)
2074 size = TREE_OPERAND (size, 1);
2075 gcc_assert (tree_fits_uhwi_p (size));
2076 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
2077 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2078 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
2079 + tree_to_uhwi (DECL_SIZE (field)));
2080
2081 /* Round up bitsize to multiples of BITS_PER_UNIT. */
2082 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2083
2084 /* Now nothing tells us how to pad out bitsize ... */
2085 if (TREE_CODE (DECL_CONTEXT (field)) == RECORD_TYPE)
2086 {
2087 nextf = DECL_CHAIN (field);
2088 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
2089 nextf = DECL_CHAIN (nextf);
2090 }
2091 else
2092 nextf = NULL_TREE;
2093 if (nextf)
2094 {
2095 tree maxsize;
2096 /* If there was an error, the field may be not laid out
2097 correctly. Don't bother to do anything. */
2098 if (TREE_TYPE (nextf) == error_mark_node)
2099 {
2100 TREE_TYPE (repr) = error_mark_node;
2101 return;
2102 }
2103 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
2104 DECL_FIELD_OFFSET (repr));
2105 if (tree_fits_uhwi_p (maxsize))
2106 {
2107 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2108 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
2109 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2110 /* If the group ends within a bitfield nextf does not need to be
2111 aligned to BITS_PER_UNIT. Thus round up. */
2112 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2113 }
2114 else
2115 maxbitsize = bitsize;
2116 }
2117 else
2118 {
2119 /* Note that if the C++ FE sets up tail-padding to be re-used it
2120 creates a as-base variant of the type with TYPE_SIZE adjusted
2121 accordingly. So it is safe to include tail-padding here. */
2122 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
2123 (DECL_CONTEXT (field));
2124 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
2125 /* We cannot generally rely on maxsize to fold to an integer constant,
2126 so use bitsize as fallback for this case. */
2127 if (tree_fits_uhwi_p (maxsize))
2128 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2129 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2130 else
2131 maxbitsize = bitsize;
2132 }
2133
2134 /* Only if we don't artificially break up the representative in
2135 the middle of a large bitfield with different possibly
2136 overlapping representatives. And all representatives start
2137 at byte offset. */
2138 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
2139
2140 /* Find the smallest nice mode to use. */
2141 opt_scalar_int_mode mode_iter;
2142 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2143 if (GET_MODE_BITSIZE (mode: mode_iter.require ()) >= bitsize)
2144 break;
2145
2146 scalar_int_mode mode;
2147 if (!mode_iter.exists (mode: &mode)
2148 || GET_MODE_BITSIZE (mode) > maxbitsize
2149 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
2150 {
2151 if (TREE_CODE (TREE_TYPE (field)) == BITINT_TYPE)
2152 {
2153 struct bitint_info info;
2154 unsigned prec = TYPE_PRECISION (TREE_TYPE (field));
2155 bool ok = targetm.c.bitint_type_info (prec, &info);
2156 gcc_assert (ok);
2157 scalar_int_mode limb_mode = as_a <scalar_int_mode> (m: info.limb_mode);
2158 unsigned lprec = GET_MODE_PRECISION (mode: limb_mode);
2159 if (prec > lprec)
2160 {
2161 /* For middle/large/huge _BitInt prefer bitsize being a multiple
2162 of limb precision. */
2163 unsigned HOST_WIDE_INT bsz = CEIL (bitsize, lprec) * lprec;
2164 if (bsz <= maxbitsize)
2165 bitsize = bsz;
2166 }
2167 }
2168 /* We really want a BLKmode representative only as a last resort,
2169 considering the member b in
2170 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
2171 Otherwise we simply want to split the representative up
2172 allowing for overlaps within the bitfield region as required for
2173 struct { int a : 7; int b : 7;
2174 int c : 10; int d; } __attribute__((packed));
2175 [0, 15] HImode for a and b, [8, 23] HImode for c. */
2176 DECL_SIZE (repr) = bitsize_int (bitsize);
2177 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
2178 SET_DECL_MODE (repr, BLKmode);
2179 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
2180 bitsize / BITS_PER_UNIT);
2181 }
2182 else
2183 {
2184 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
2185 DECL_SIZE (repr) = bitsize_int (modesize);
2186 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
2187 SET_DECL_MODE (repr, mode);
2188 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
2189 }
2190
2191 /* Remember whether the bitfield group is at the end of the
2192 structure or not. */
2193 DECL_CHAIN (repr) = nextf;
2194}
2195
2196/* Compute and set FIELD_DECLs for the underlying objects we should
2197 use for bitfield access for the structure T. */
2198
2199void
2200finish_bitfield_layout (tree t)
2201{
2202 tree field, prev;
2203 tree repr = NULL_TREE;
2204
2205 if (TREE_CODE (t) == QUAL_UNION_TYPE)
2206 return;
2207
2208 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
2209 field; field = DECL_CHAIN (field))
2210 {
2211 if (TREE_CODE (field) != FIELD_DECL)
2212 continue;
2213
2214 /* In the C++ memory model, consecutive bit fields in a structure are
2215 considered one memory location and updating a memory location
2216 may not store into adjacent memory locations. */
2217 if (!repr
2218 && DECL_BIT_FIELD_TYPE (field))
2219 {
2220 /* Start new representative. */
2221 repr = start_bitfield_representative (field);
2222 }
2223 else if (repr
2224 && ! DECL_BIT_FIELD_TYPE (field))
2225 {
2226 /* Finish off new representative. */
2227 finish_bitfield_representative (repr, field: prev);
2228 repr = NULL_TREE;
2229 }
2230 else if (DECL_BIT_FIELD_TYPE (field))
2231 {
2232 gcc_assert (repr != NULL_TREE);
2233
2234 /* Zero-size bitfields finish off a representative and
2235 do not have a representative themselves. This is
2236 required by the C++ memory model. */
2237 if (integer_zerop (DECL_SIZE (field)))
2238 {
2239 finish_bitfield_representative (repr, field: prev);
2240 repr = NULL_TREE;
2241 }
2242
2243 /* We assume that either DECL_FIELD_OFFSET of the representative
2244 and each bitfield member is a constant or they are equal.
2245 This is because we need to be able to compute the bit-offset
2246 of each field relative to the representative in get_bit_range
2247 during RTL expansion.
2248 If these constraints are not met, simply force a new
2249 representative to be generated. That will at most
2250 generate worse code but still maintain correctness with
2251 respect to the C++ memory model. */
2252 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2253 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2254 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2255 DECL_FIELD_OFFSET (field), flags: 0)))
2256 {
2257 finish_bitfield_representative (repr, field: prev);
2258 repr = start_bitfield_representative (field);
2259 }
2260 }
2261 else
2262 continue;
2263
2264 if (repr)
2265 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2266
2267 if (TREE_CODE (t) == RECORD_TYPE)
2268 prev = field;
2269 else if (repr)
2270 {
2271 finish_bitfield_representative (repr, field);
2272 repr = NULL_TREE;
2273 }
2274 }
2275
2276 if (repr)
2277 finish_bitfield_representative (repr, field: prev);
2278}
2279
2280/* Do all of the work required to layout the type indicated by RLI,
2281 once the fields have been laid out. This function will call `free'
2282 for RLI, unless FREE_P is false. Passing a value other than false
2283 for FREE_P is bad practice; this option only exists to support the
2284 G++ 3.2 ABI. */
2285
2286void
2287finish_record_layout (record_layout_info rli, int free_p)
2288{
2289 tree variant;
2290
2291 /* Compute the final size. */
2292 finalize_record_size (rli);
2293
2294 /* Compute the TYPE_MODE for the record. */
2295 compute_record_mode (type: rli->t);
2296
2297 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2298 finalize_type_size (type: rli->t);
2299
2300 /* Compute bitfield representatives. */
2301 finish_bitfield_layout (t: rli->t);
2302
2303 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
2304 With C++ templates, it is too early to do this when the attribute
2305 is being parsed. */
2306 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2307 variant = TYPE_NEXT_VARIANT (variant))
2308 {
2309 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2310 TYPE_REVERSE_STORAGE_ORDER (variant)
2311 = TYPE_REVERSE_STORAGE_ORDER (rli->t);
2312 }
2313
2314 /* Lay out any static members. This is done now because their type
2315 may use the record's type. */
2316 while (!vec_safe_is_empty (v: rli->pending_statics))
2317 layout_decl (decl: rli->pending_statics->pop (), known_align: 0);
2318
2319 /* Clean up. */
2320 if (free_p)
2321 {
2322 vec_free (v&: rli->pending_statics);
2323 free (ptr: rli);
2324 }
2325}
2326
2327
2328/* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2329 NAME, its fields are chained in reverse on FIELDS.
2330
2331 If ALIGN_TYPE is non-null, it is given the same alignment as
2332 ALIGN_TYPE. */
2333
2334void
2335finish_builtin_struct (tree type, const char *name, tree fields,
2336 tree align_type)
2337{
2338 tree tail, next;
2339
2340 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2341 {
2342 DECL_FIELD_CONTEXT (fields) = type;
2343 next = DECL_CHAIN (fields);
2344 DECL_CHAIN (fields) = tail;
2345 }
2346 TYPE_FIELDS (type) = tail;
2347
2348 if (align_type)
2349 {
2350 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
2351 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2352 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2353 TYPE_WARN_IF_NOT_ALIGN (align_type));
2354 }
2355
2356 layout_type (type);
2357#if 0 /* not yet, should get fixed properly later */
2358 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2359#else
2360 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2361 TYPE_DECL, get_identifier (name), type);
2362#endif
2363 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2364 layout_decl (TYPE_NAME (type), known_align: 0);
2365}
2366
2367/* Calculate the mode, size, and alignment for TYPE.
2368 For an array type, calculate the element separation as well.
2369 Record TYPE on the chain of permanent or temporary types
2370 so that dbxout will find out about it.
2371
2372 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2373 layout_type does nothing on such a type.
2374
2375 If the type is incomplete, its TYPE_SIZE remains zero. */
2376
2377void
2378layout_type (tree type)
2379{
2380 gcc_assert (type);
2381
2382 if (type == error_mark_node)
2383 return;
2384
2385 /* We don't want finalize_type_size to copy an alignment attribute to
2386 variants that don't have it. */
2387 type = TYPE_MAIN_VARIANT (type);
2388
2389 /* Do nothing if type has been laid out before. */
2390 if (TYPE_SIZE (type))
2391 return;
2392
2393 switch (TREE_CODE (type))
2394 {
2395 case LANG_TYPE:
2396 /* This kind of type is the responsibility
2397 of the language-specific code. */
2398 gcc_unreachable ();
2399
2400 case BOOLEAN_TYPE:
2401 case INTEGER_TYPE:
2402 case ENUMERAL_TYPE:
2403 {
2404 scalar_int_mode mode
2405 = smallest_int_mode_for_size (TYPE_PRECISION (type));
2406 SET_TYPE_MODE (type, mode);
2407 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2408 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2409 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2410 break;
2411 }
2412
2413 case BITINT_TYPE:
2414 {
2415 struct bitint_info info;
2416 int cnt;
2417 bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type), &info);
2418 gcc_assert (ok);
2419 scalar_int_mode limb_mode = as_a <scalar_int_mode> (m: info.limb_mode);
2420 if (TYPE_PRECISION (type) <= GET_MODE_PRECISION (mode: limb_mode))
2421 {
2422 SET_TYPE_MODE (type, limb_mode);
2423 cnt = 1;
2424 }
2425 else
2426 {
2427 SET_TYPE_MODE (type, BLKmode);
2428 cnt = CEIL (TYPE_PRECISION (type), GET_MODE_PRECISION (limb_mode));
2429 }
2430 TYPE_SIZE (type) = bitsize_int (cnt * GET_MODE_BITSIZE (limb_mode));
2431 TYPE_SIZE_UNIT (type) = size_int (cnt * GET_MODE_SIZE (limb_mode));
2432 SET_TYPE_ALIGN (type, GET_MODE_ALIGNMENT (limb_mode));
2433 if (cnt > 1)
2434 {
2435 /* Use same mode as compute_record_mode would use for a structure
2436 containing cnt limb_mode elements. */
2437 machine_mode mode = mode_for_size_tree (TYPE_SIZE (type),
2438 mclass: MODE_INT, limit: 1).else_blk ();
2439 if (mode == BLKmode)
2440 break;
2441 finalize_type_size (type);
2442 SET_TYPE_MODE (type, mode);
2443 if (STRICT_ALIGNMENT
2444 && !(TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
2445 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
2446 {
2447 /* If this is the only reason this type is BLKmode, then
2448 don't force containing types to be BLKmode. */
2449 TYPE_NO_FORCE_BLK (type) = 1;
2450 SET_TYPE_MODE (type, BLKmode);
2451 }
2452 if (TYPE_NEXT_VARIANT (type) || type != TYPE_MAIN_VARIANT (type))
2453 for (tree variant = TYPE_MAIN_VARIANT (type);
2454 variant != NULL_TREE;
2455 variant = TYPE_NEXT_VARIANT (variant))
2456 {
2457 SET_TYPE_MODE (variant, mode);
2458 if (STRICT_ALIGNMENT
2459 && !(TYPE_ALIGN (variant) >= BIGGEST_ALIGNMENT
2460 || (TYPE_ALIGN (variant)
2461 >= GET_MODE_ALIGNMENT (mode))))
2462 {
2463 TYPE_NO_FORCE_BLK (variant) = 1;
2464 SET_TYPE_MODE (variant, BLKmode);
2465 }
2466 }
2467 return;
2468 }
2469 break;
2470 }
2471
2472 case REAL_TYPE:
2473 {
2474 /* Allow the caller to choose the type mode, which is how decimal
2475 floats are distinguished from binary ones. */
2476 if (TYPE_MODE (type) == VOIDmode)
2477 SET_TYPE_MODE
2478 (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
2479 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
2480 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2481 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2482 break;
2483 }
2484
2485 case FIXED_POINT_TYPE:
2486 {
2487 /* TYPE_MODE (type) has been set already. */
2488 scalar_mode mode = SCALAR_TYPE_MODE (type);
2489 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2490 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2491 break;
2492 }
2493
2494 case COMPLEX_TYPE:
2495 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2496 if (TYPE_MODE (TREE_TYPE (type)) == BLKmode)
2497 {
2498 gcc_checking_assert (TREE_CODE (TREE_TYPE (type)) == BITINT_TYPE);
2499 SET_TYPE_MODE (type, BLKmode);
2500 TYPE_SIZE (type)
2501 = int_const_binop (MULT_EXPR, TYPE_SIZE (TREE_TYPE (type)),
2502 bitsize_int (2));
2503 TYPE_SIZE_UNIT (type)
2504 = int_const_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (type)),
2505 bitsize_int (2));
2506 break;
2507 }
2508 SET_TYPE_MODE (type,
2509 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
2510
2511 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2512 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2513 break;
2514
2515 case VECTOR_TYPE:
2516 {
2517 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (node: type);
2518 tree innertype = TREE_TYPE (type);
2519
2520 /* Find an appropriate mode for the vector type. */
2521 if (TYPE_MODE (type) == VOIDmode)
2522 SET_TYPE_MODE (type,
2523 mode_for_vector (SCALAR_TYPE_MODE (innertype),
2524 nunits).else_blk ());
2525
2526 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2527 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2528 /* Several boolean vector elements may fit in a single unit. */
2529 if (VECTOR_BOOLEAN_TYPE_P (type)
2530 && type->type_common.mode != BLKmode)
2531 TYPE_SIZE_UNIT (type)
2532 = size_int (GET_MODE_SIZE (type->type_common.mode));
2533 else
2534 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2535 TYPE_SIZE_UNIT (innertype),
2536 size_int (nunits));
2537 TYPE_SIZE (type) = int_const_binop
2538 (MULT_EXPR,
2539 bits_from_bytes (TYPE_SIZE_UNIT (type)),
2540 bitsize_int (BITS_PER_UNIT));
2541
2542 /* For vector types, we do not default to the mode's alignment.
2543 Instead, query a target hook, defaulting to natural alignment.
2544 This prevents ABI changes depending on whether or not native
2545 vector modes are supported. */
2546 SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
2547
2548 /* However, if the underlying mode requires a bigger alignment than
2549 what the target hook provides, we cannot use the mode. For now,
2550 simply reject that case. */
2551 gcc_assert (TYPE_ALIGN (type)
2552 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2553 break;
2554 }
2555
2556 case VOID_TYPE:
2557 /* This is an incomplete type and so doesn't have a size. */
2558 SET_TYPE_ALIGN (type, 1);
2559 TYPE_USER_ALIGN (type) = 0;
2560 SET_TYPE_MODE (type, VOIDmode);
2561 break;
2562
2563 case OFFSET_TYPE:
2564 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2565 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2566 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2567 integral, which may be an __intN. */
2568 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
2569 TYPE_PRECISION (type) = POINTER_SIZE;
2570 break;
2571
2572 case FUNCTION_TYPE:
2573 case METHOD_TYPE:
2574 /* It's hard to see what the mode and size of a function ought to
2575 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2576 make it consistent with that. */
2577 SET_TYPE_MODE (type,
2578 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
2579 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2580 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2581 break;
2582
2583 case POINTER_TYPE:
2584 case REFERENCE_TYPE:
2585 {
2586 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2587 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2588 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2589 TYPE_UNSIGNED (type) = 1;
2590 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2591 }
2592 break;
2593
2594 case ARRAY_TYPE:
2595 {
2596 tree index = TYPE_DOMAIN (type);
2597 tree element = TREE_TYPE (type);
2598
2599 /* We need to know both bounds in order to compute the size. */
2600 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2601 && TYPE_SIZE (element))
2602 {
2603 tree ub = TYPE_MAX_VALUE (index);
2604 tree lb = TYPE_MIN_VALUE (index);
2605 tree element_size = TYPE_SIZE (element);
2606 tree length;
2607
2608 /* Make sure that an array of zero-sized element is zero-sized
2609 regardless of its extent. */
2610 if (integer_zerop (element_size))
2611 length = size_zero_node;
2612
2613 /* The computation should happen in the original signedness so
2614 that (possible) negative values are handled appropriately
2615 when determining overflow. */
2616 else
2617 {
2618 /* ??? When it is obvious that the range is signed
2619 represent it using ssizetype. */
2620 if (TREE_CODE (lb) == INTEGER_CST
2621 && TREE_CODE (ub) == INTEGER_CST
2622 && TYPE_UNSIGNED (TREE_TYPE (lb))
2623 && tree_int_cst_lt (t1: ub, t2: lb))
2624 {
2625 lb = wide_int_to_tree (ssizetype,
2626 cst: offset_int::from (x: wi::to_wide (t: lb),
2627 sgn: SIGNED));
2628 ub = wide_int_to_tree (ssizetype,
2629 cst: offset_int::from (x: wi::to_wide (t: ub),
2630 sgn: SIGNED));
2631 }
2632 length
2633 = fold_convert (sizetype,
2634 size_binop (PLUS_EXPR,
2635 build_int_cst (TREE_TYPE (lb), 1),
2636 size_binop (MINUS_EXPR, ub, lb)));
2637 }
2638
2639 /* ??? We have no way to distinguish a null-sized array from an
2640 array spanning the whole sizetype range, so we arbitrarily
2641 decide that [0, -1] is the only valid representation. */
2642 if (integer_zerop (length)
2643 && TREE_OVERFLOW (length)
2644 && integer_zerop (lb))
2645 length = size_zero_node;
2646
2647 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2648 bits_from_bytes (length));
2649
2650 /* If we know the size of the element, calculate the total size
2651 directly, rather than do some division thing below. This
2652 optimization helps Fortran assumed-size arrays (where the
2653 size of the array is determined at runtime) substantially. */
2654 if (TYPE_SIZE_UNIT (element))
2655 TYPE_SIZE_UNIT (type)
2656 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2657 }
2658
2659 /* Now round the alignment and size,
2660 using machine-dependent criteria if any. */
2661
2662 unsigned align = TYPE_ALIGN (element);
2663 if (TYPE_USER_ALIGN (type))
2664 align = MAX (align, TYPE_ALIGN (type));
2665 else
2666 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2667 if (!TYPE_WARN_IF_NOT_ALIGN (type))
2668 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2669 TYPE_WARN_IF_NOT_ALIGN (element));
2670#ifdef ROUND_TYPE_ALIGN
2671 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2672#else
2673 align = MAX (align, BITS_PER_UNIT);
2674#endif
2675 SET_TYPE_ALIGN (type, align);
2676 SET_TYPE_MODE (type, BLKmode);
2677 if (TYPE_SIZE (type) != 0
2678 && ! targetm.member_type_forces_blk (type, VOIDmode)
2679 /* BLKmode elements force BLKmode aggregate;
2680 else extract/store fields may lose. */
2681 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2682 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2683 {
2684 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2685 TYPE_SIZE (type)));
2686 if (TYPE_MODE (type) != BLKmode
2687 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2688 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2689 {
2690 TYPE_NO_FORCE_BLK (type) = 1;
2691 SET_TYPE_MODE (type, BLKmode);
2692 }
2693 }
2694 if (AGGREGATE_TYPE_P (element))
2695 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
2696 /* When the element size is constant, check that it is at least as
2697 large as the element alignment. */
2698 if (TYPE_SIZE_UNIT (element)
2699 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2700 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2701 TYPE_ALIGN_UNIT. */
2702 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2703 && !integer_zerop (TYPE_SIZE_UNIT (element)))
2704 {
2705 if (compare_tree_int (TYPE_SIZE_UNIT (element),
2706 TYPE_ALIGN_UNIT (element)) < 0)
2707 error ("alignment of array elements is greater than "
2708 "element size");
2709 else if (TYPE_ALIGN_UNIT (element) > 1
2710 && (wi::zext (x: wi::to_wide (TYPE_SIZE_UNIT (element)),
2711 offset: ffs_hwi (TYPE_ALIGN_UNIT (element)) - 1)
2712 != 0))
2713 error ("size of array element is not a multiple of its "
2714 "alignment");
2715 }
2716 break;
2717 }
2718
2719 case RECORD_TYPE:
2720 case UNION_TYPE:
2721 case QUAL_UNION_TYPE:
2722 {
2723 tree field;
2724 record_layout_info rli;
2725
2726 /* Initialize the layout information. */
2727 rli = start_record_layout (t: type);
2728
2729 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2730 in the reverse order in building the COND_EXPR that denotes
2731 its size. We reverse them again later. */
2732 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2733 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2734
2735 /* Place all the fields. */
2736 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2737 place_field (rli, field);
2738
2739 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2740 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2741
2742 /* Finish laying out the record. */
2743 finish_record_layout (rli, /*free_p=*/true);
2744 }
2745 break;
2746
2747 default:
2748 gcc_unreachable ();
2749 }
2750
2751 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2752 records and unions, finish_record_layout already called this
2753 function. */
2754 if (!RECORD_OR_UNION_TYPE_P (type))
2755 finalize_type_size (type);
2756
2757 /* We should never see alias sets on incomplete aggregates. And we
2758 should not call layout_type on not incomplete aggregates. */
2759 if (AGGREGATE_TYPE_P (type))
2760 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2761}
2762
2763/* Return the least alignment required for type TYPE. */
2764
2765unsigned int
2766min_align_of_type (tree type)
2767{
2768 unsigned int align = TYPE_ALIGN (type);
2769 if (!TYPE_USER_ALIGN (type))
2770 {
2771 align = MIN (align, BIGGEST_ALIGNMENT);
2772#ifdef BIGGEST_FIELD_ALIGNMENT
2773 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2774#endif
2775 unsigned int field_align = align;
2776#ifdef ADJUST_FIELD_ALIGN
2777 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
2778#endif
2779 align = MIN (align, field_align);
2780 }
2781 return align / BITS_PER_UNIT;
2782}
2783
2784/* Create and return a type for signed integers of PRECISION bits. */
2785
2786tree
2787make_signed_type (int precision)
2788{
2789 tree type = make_node (INTEGER_TYPE);
2790
2791 TYPE_PRECISION (type) = precision;
2792
2793 fixup_signed_type (type);
2794 return type;
2795}
2796
2797/* Create and return a type for unsigned integers of PRECISION bits. */
2798
2799tree
2800make_unsigned_type (int precision)
2801{
2802 tree type = make_node (INTEGER_TYPE);
2803
2804 TYPE_PRECISION (type) = precision;
2805
2806 fixup_unsigned_type (type);
2807 return type;
2808}
2809
2810/* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2811 and SATP. */
2812
2813tree
2814make_fract_type (int precision, int unsignedp, int satp)
2815{
2816 tree type = make_node (FIXED_POINT_TYPE);
2817
2818 TYPE_PRECISION (type) = precision;
2819
2820 if (satp)
2821 TYPE_SATURATING (type) = 1;
2822
2823 /* Lay out the type: set its alignment, size, etc. */
2824 TYPE_UNSIGNED (type) = unsignedp;
2825 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
2826 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2827 layout_type (type);
2828
2829 return type;
2830}
2831
2832/* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2833 and SATP. */
2834
2835tree
2836make_accum_type (int precision, int unsignedp, int satp)
2837{
2838 tree type = make_node (FIXED_POINT_TYPE);
2839
2840 TYPE_PRECISION (type) = precision;
2841
2842 if (satp)
2843 TYPE_SATURATING (type) = 1;
2844
2845 /* Lay out the type: set its alignment, size, etc. */
2846 TYPE_UNSIGNED (type) = unsignedp;
2847 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
2848 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2849 layout_type (type);
2850
2851 return type;
2852}
2853
2854/* Initialize sizetypes so layout_type can use them. */
2855
2856void
2857initialize_sizetypes (void)
2858{
2859 int precision, bprecision;
2860
2861 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2862 if (strcmp (SIZETYPE, s2: "unsigned int") == 0)
2863 precision = INT_TYPE_SIZE;
2864 else if (strcmp (SIZETYPE, s2: "long unsigned int") == 0)
2865 precision = LONG_TYPE_SIZE;
2866 else if (strcmp (SIZETYPE, s2: "long long unsigned int") == 0)
2867 precision = LONG_LONG_TYPE_SIZE;
2868 else if (strcmp (SIZETYPE, s2: "short unsigned int") == 0)
2869 precision = SHORT_TYPE_SIZE;
2870 else
2871 {
2872 int i;
2873
2874 precision = -1;
2875 for (i = 0; i < NUM_INT_N_ENTS; i++)
2876 if (int_n_enabled_p[i])
2877 {
2878 char name[50], altname[50];
2879 sprintf (s: name, format: "__int%d unsigned", int_n_data[i].bitsize);
2880 sprintf (s: altname, format: "__int%d__ unsigned", int_n_data[i].bitsize);
2881
2882 if (strcmp (s1: name, SIZETYPE) == 0
2883 || strcmp (s1: altname, SIZETYPE) == 0)
2884 {
2885 precision = int_n_data[i].bitsize;
2886 }
2887 }
2888 if (precision == -1)
2889 gcc_unreachable ();
2890 }
2891
2892 bprecision
2893 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
2894 bprecision = GET_MODE_PRECISION (mode: smallest_int_mode_for_size (size: bprecision));
2895 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2896 bprecision = HOST_BITS_PER_DOUBLE_INT;
2897
2898 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2899 sizetype = make_node (INTEGER_TYPE);
2900 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2901 TYPE_PRECISION (sizetype) = precision;
2902 TYPE_UNSIGNED (sizetype) = 1;
2903 bitsizetype = make_node (INTEGER_TYPE);
2904 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2905 TYPE_PRECISION (bitsizetype) = bprecision;
2906 TYPE_UNSIGNED (bitsizetype) = 1;
2907
2908 /* Now layout both types manually. */
2909 scalar_int_mode mode = smallest_int_mode_for_size (size: precision);
2910 SET_TYPE_MODE (sizetype, mode);
2911 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
2912 TYPE_SIZE (sizetype) = bitsize_int (precision);
2913 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
2914 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2915
2916 mode = smallest_int_mode_for_size (size: bprecision);
2917 SET_TYPE_MODE (bitsizetype, mode);
2918 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
2919 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2920 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
2921 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2922
2923 /* Create the signed variants of *sizetype. */
2924 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2925 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2926 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2927 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2928}
2929
2930/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2931 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2932 for TYPE, based on the PRECISION and whether or not the TYPE
2933 IS_UNSIGNED. PRECISION need not correspond to a width supported
2934 natively by the hardware; for example, on a machine with 8-bit,
2935 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2936 61. */
2937
2938void
2939set_min_and_max_values_for_integral_type (tree type,
2940 int precision,
2941 signop sgn)
2942{
2943 /* For bitfields with zero width we end up creating integer types
2944 with zero precision. Don't assign any minimum/maximum values
2945 to those types, they don't have any valid value. */
2946 if (precision < 1)
2947 return;
2948
2949 gcc_assert (precision <= WIDE_INT_MAX_PRECISION);
2950
2951 TYPE_MIN_VALUE (type)
2952 = wide_int_to_tree (type, cst: wi::min_value (precision, sgn));
2953 TYPE_MAX_VALUE (type)
2954 = wide_int_to_tree (type, cst: wi::max_value (precision, sgn));
2955}
2956
2957/* Set the extreme values of TYPE based on its precision in bits,
2958 then lay it out. Used when make_signed_type won't do
2959 because the tree code is not INTEGER_TYPE. */
2960
2961void
2962fixup_signed_type (tree type)
2963{
2964 int precision = TYPE_PRECISION (type);
2965
2966 set_min_and_max_values_for_integral_type (type, precision, sgn: SIGNED);
2967
2968 /* Lay out the type: set its alignment, size, etc. */
2969 layout_type (type);
2970}
2971
2972/* Set the extreme values of TYPE based on its precision in bits,
2973 then lay it out. This is used both in `make_unsigned_type'
2974 and for enumeral types. */
2975
2976void
2977fixup_unsigned_type (tree type)
2978{
2979 int precision = TYPE_PRECISION (type);
2980
2981 TYPE_UNSIGNED (type) = 1;
2982
2983 set_min_and_max_values_for_integral_type (type, precision, sgn: UNSIGNED);
2984
2985 /* Lay out the type: set its alignment, size, etc. */
2986 layout_type (type);
2987}
2988
2989/* Construct an iterator for a bitfield that spans BITSIZE bits,
2990 starting at BITPOS.
2991
2992 BITREGION_START is the bit position of the first bit in this
2993 sequence of bit fields. BITREGION_END is the last bit in this
2994 sequence. If these two fields are non-zero, we should restrict the
2995 memory access to that range. Otherwise, we are allowed to touch
2996 any adjacent non bit-fields.
2997
2998 ALIGN is the alignment of the underlying object in bits.
2999 VOLATILEP says whether the bitfield is volatile. */
3000
3001bit_field_mode_iterator
3002::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
3003 poly_int64 bitregion_start,
3004 poly_int64 bitregion_end,
3005 unsigned int align, bool volatilep)
3006: m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
3007 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
3008 m_bitregion_end (bitregion_end), m_align (align),
3009 m_volatilep (volatilep), m_count (0)
3010{
3011 if (known_eq (m_bitregion_end, 0))
3012 {
3013 /* We can assume that any aligned chunk of ALIGN bits that overlaps
3014 the bitfield is mapped and won't trap, provided that ALIGN isn't
3015 too large. The cap is the biggest required alignment for data,
3016 or at least the word size. And force one such chunk at least. */
3017 unsigned HOST_WIDE_INT units
3018 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
3019 if (bitsize <= 0)
3020 bitsize = 1;
3021 HOST_WIDE_INT end = bitpos + bitsize + units - 1;
3022 m_bitregion_end = end - end % units - 1;
3023 }
3024}
3025
3026/* Calls to this function return successively larger modes that can be used
3027 to represent the bitfield. Return true if another bitfield mode is
3028 available, storing it in *OUT_MODE if so. */
3029
3030bool
3031bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
3032{
3033 scalar_int_mode mode;
3034 for (; m_mode.exists (mode: &mode); m_mode = GET_MODE_WIDER_MODE (m: mode))
3035 {
3036 unsigned int unit = GET_MODE_BITSIZE (mode);
3037
3038 /* Skip modes that don't have full precision. */
3039 if (unit != GET_MODE_PRECISION (mode))
3040 continue;
3041
3042 /* Stop if the mode is too wide to handle efficiently. */
3043 if (unit > MAX_FIXED_MODE_SIZE)
3044 break;
3045
3046 /* Don't deliver more than one multiword mode; the smallest one
3047 should be used. */
3048 if (m_count > 0 && unit > BITS_PER_WORD)
3049 break;
3050
3051 /* Skip modes that are too small. */
3052 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
3053 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
3054 if (subend > unit)
3055 continue;
3056
3057 /* Stop if the mode goes outside the bitregion. */
3058 HOST_WIDE_INT start = m_bitpos - substart;
3059 if (maybe_ne (a: m_bitregion_start, b: 0)
3060 && maybe_lt (a: start, b: m_bitregion_start))
3061 break;
3062 HOST_WIDE_INT end = start + unit;
3063 if (maybe_gt (end, m_bitregion_end + 1))
3064 break;
3065
3066 /* Stop if the mode requires too much alignment. */
3067 if (GET_MODE_ALIGNMENT (mode) > m_align
3068 && targetm.slow_unaligned_access (mode, m_align))
3069 break;
3070
3071 *out_mode = mode;
3072 m_mode = GET_MODE_WIDER_MODE (m: mode);
3073 m_count++;
3074 return true;
3075 }
3076 return false;
3077}
3078
3079/* Return true if smaller modes are generally preferred for this kind
3080 of bitfield. */
3081
3082bool
3083bit_field_mode_iterator::prefer_smaller_modes ()
3084{
3085 return (m_volatilep
3086 ? targetm.narrow_volatile_bitfield ()
3087 : !SLOW_BYTE_ACCESS);
3088}
3089
3090/* Find the best machine mode to use when referencing a bit field of length
3091 BITSIZE bits starting at BITPOS.
3092
3093 BITREGION_START is the bit position of the first bit in this
3094 sequence of bit fields. BITREGION_END is the last bit in this
3095 sequence. If these two fields are non-zero, we should restrict the
3096 memory access to that range. Otherwise, we are allowed to touch
3097 any adjacent non bit-fields.
3098
3099 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
3100 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
3101 doesn't want to apply a specific limit.
3102
3103 If no mode meets all these conditions, we return VOIDmode.
3104
3105 The underlying object is known to be aligned to a boundary of ALIGN bits.
3106
3107 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
3108 smallest mode meeting these conditions.
3109
3110 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
3111 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
3112 all the conditions.
3113
3114 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
3115 decide which of the above modes should be used. */
3116
3117bool
3118get_best_mode (int bitsize, int bitpos,
3119 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
3120 unsigned int align,
3121 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
3122 scalar_int_mode *best_mode)
3123{
3124 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
3125 bitregion_end, align, volatilep);
3126 scalar_int_mode mode;
3127 bool found = false;
3128 while (iter.next_mode (out_mode: &mode)
3129 /* ??? For historical reasons, reject modes that would normally
3130 receive greater alignment, even if unaligned accesses are
3131 acceptable. This has both advantages and disadvantages.
3132 Removing this check means that something like:
3133
3134 struct s { unsigned int x; unsigned int y; };
3135 int f (struct s *s) { return s->x == 0 && s->y == 0; }
3136
3137 can be implemented using a single load and compare on
3138 64-bit machines that have no alignment restrictions.
3139 For example, on powerpc64-linux-gnu, we would generate:
3140
3141 ld 3,0(3)
3142 cntlzd 3,3
3143 srdi 3,3,6
3144 blr
3145
3146 rather than:
3147
3148 lwz 9,0(3)
3149 cmpwi 7,9,0
3150 bne 7,.L3
3151 lwz 3,4(3)
3152 cntlzw 3,3
3153 srwi 3,3,5
3154 extsw 3,3
3155 blr
3156 .p2align 4,,15
3157 .L3:
3158 li 3,0
3159 blr
3160
3161 However, accessing more than one field can make life harder
3162 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
3163 has a series of unsigned short copies followed by a series of
3164 unsigned short comparisons. With this check, both the copies
3165 and comparisons remain 16-bit accesses and FRE is able
3166 to eliminate the latter. Without the check, the comparisons
3167 can be done using 2 64-bit operations, which FRE isn't able
3168 to handle in the same way.
3169
3170 Either way, it would probably be worth disabling this check
3171 during expand. One particular example where removing the
3172 check would help is the get_best_mode call in store_bit_field.
3173 If we are given a memory bitregion of 128 bits that is aligned
3174 to a 64-bit boundary, and the bitfield we want to modify is
3175 in the second half of the bitregion, this check causes
3176 store_bitfield to turn the memory into a 64-bit reference
3177 to the _first_ half of the region. We later use
3178 adjust_bitfield_address to get a reference to the correct half,
3179 but doing so looks to adjust_bitfield_address as though we are
3180 moving past the end of the original object, so it drops the
3181 associated MEM_EXPR and MEM_OFFSET. Removing the check
3182 causes store_bit_field to keep a 128-bit memory reference,
3183 so that the final bitfield reference still has a MEM_EXPR
3184 and MEM_OFFSET. */
3185 && GET_MODE_ALIGNMENT (mode) <= align
3186 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
3187 {
3188 *best_mode = mode;
3189 found = true;
3190 if (iter.prefer_smaller_modes ())
3191 break;
3192 }
3193
3194 return found;
3195}
3196
3197/* Gets minimal and maximal values for MODE (signed or unsigned depending on
3198 SIGN). The returned constants are made to be usable in TARGET_MODE. */
3199
3200void
3201get_mode_bounds (scalar_int_mode mode, int sign,
3202 scalar_int_mode target_mode,
3203 rtx *mmin, rtx *mmax)
3204{
3205 unsigned size = GET_MODE_PRECISION (mode);
3206 unsigned HOST_WIDE_INT min_val, max_val;
3207
3208 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
3209
3210 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
3211 if (mode == BImode)
3212 {
3213 if (STORE_FLAG_VALUE < 0)
3214 {
3215 min_val = STORE_FLAG_VALUE;
3216 max_val = 0;
3217 }
3218 else
3219 {
3220 min_val = 0;
3221 max_val = STORE_FLAG_VALUE;
3222 }
3223 }
3224 else if (sign)
3225 {
3226 min_val = -(HOST_WIDE_INT_1U << (size - 1));
3227 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
3228 }
3229 else
3230 {
3231 min_val = 0;
3232 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
3233 }
3234
3235 *mmin = gen_int_mode (min_val, target_mode);
3236 *mmax = gen_int_mode (max_val, target_mode);
3237}
3238
3239#include "gt-stor-layout.h"
3240

source code of gcc/stor-layout.cc