1 | /* GIMPLE store merging and byte swapping passes. |
2 | Copyright (C) 2009-2023 Free Software Foundation, Inc. |
3 | Contributed by ARM Ltd. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it |
8 | under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | /* The purpose of the store merging pass is to combine multiple memory stores |
22 | of constant values, values loaded from memory, bitwise operations on those, |
23 | or bit-field values, to consecutive locations, into fewer wider stores. |
24 | |
25 | For example, if we have a sequence peforming four byte stores to |
26 | consecutive memory locations: |
27 | [p ] := imm1; |
28 | [p + 1B] := imm2; |
29 | [p + 2B] := imm3; |
30 | [p + 3B] := imm4; |
31 | we can transform this into a single 4-byte store if the target supports it: |
32 | [p] := imm1:imm2:imm3:imm4 concatenated according to endianness. |
33 | |
34 | Or: |
35 | [p ] := [q ]; |
36 | [p + 1B] := [q + 1B]; |
37 | [p + 2B] := [q + 2B]; |
38 | [p + 3B] := [q + 3B]; |
39 | if there is no overlap can be transformed into a single 4-byte |
40 | load followed by single 4-byte store. |
41 | |
42 | Or: |
43 | [p ] := [q ] ^ imm1; |
44 | [p + 1B] := [q + 1B] ^ imm2; |
45 | [p + 2B] := [q + 2B] ^ imm3; |
46 | [p + 3B] := [q + 3B] ^ imm4; |
47 | if there is no overlap can be transformed into a single 4-byte |
48 | load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store. |
49 | |
50 | Or: |
51 | [p:1 ] := imm; |
52 | [p:31] := val & 0x7FFFFFFF; |
53 | we can transform this into a single 4-byte store if the target supports it: |
54 | [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness. |
55 | |
56 | The algorithm is applied to each basic block in three phases: |
57 | |
58 | 1) Scan through the basic block and record assignments to destinations |
59 | that can be expressed as a store to memory of a certain size at a certain |
60 | bit offset from base expressions we can handle. For bit-fields we also |
61 | record the surrounding bit region, i.e. bits that could be stored in |
62 | a read-modify-write operation when storing the bit-field. Record store |
63 | chains to different bases in a hash_map (m_stores) and make sure to |
64 | terminate such chains when appropriate (for example when the stored |
65 | values get used subsequently). |
66 | These stores can be a result of structure element initializers, array stores |
67 | etc. A store_immediate_info object is recorded for every such store. |
68 | Record as many such assignments to a single base as possible until a |
69 | statement that interferes with the store sequence is encountered. |
70 | Each store has up to 2 operands, which can be a either constant, a memory |
71 | load or an SSA name, from which the value to be stored can be computed. |
72 | At most one of the operands can be a constant. The operands are recorded |
73 | in store_operand_info struct. |
74 | |
75 | 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of |
76 | store_immediate_info objects) and coalesce contiguous stores into |
77 | merged_store_group objects. For bit-field stores, we don't need to |
78 | require the stores to be contiguous, just their surrounding bit regions |
79 | have to be contiguous. If the expression being stored is different |
80 | between adjacent stores, such as one store storing a constant and |
81 | following storing a value loaded from memory, or if the loaded memory |
82 | objects are not adjacent, a new merged_store_group is created as well. |
83 | |
84 | For example, given the stores: |
85 | [p ] := 0; |
86 | [p + 1B] := 1; |
87 | [p + 3B] := 0; |
88 | [p + 4B] := 1; |
89 | [p + 5B] := 0; |
90 | [p + 6B] := 0; |
91 | This phase would produce two merged_store_group objects, one recording the |
92 | two bytes stored in the memory region [p : p + 1] and another |
93 | recording the four bytes stored in the memory region [p + 3 : p + 6]. |
94 | |
95 | 3) The merged_store_group objects produced in phase 2) are processed |
96 | to generate the sequence of wider stores that set the contiguous memory |
97 | regions to the sequence of bytes that correspond to it. This may emit |
98 | multiple stores per store group to handle contiguous stores that are not |
99 | of a size that is a power of 2. For example it can try to emit a 40-bit |
100 | store as a 32-bit store followed by an 8-bit store. |
101 | We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT |
102 | or TARGET_SLOW_UNALIGNED_ACCESS settings. |
103 | |
104 | Note on endianness and example: |
105 | Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores: |
106 | [p ] := 0x1234; |
107 | [p + 2B] := 0x5678; |
108 | [p + 4B] := 0xab; |
109 | [p + 5B] := 0xcd; |
110 | |
111 | The memory layout for little-endian (LE) and big-endian (BE) must be: |
112 | p |LE|BE| |
113 | --------- |
114 | 0 |34|12| |
115 | 1 |12|34| |
116 | 2 |78|56| |
117 | 3 |56|78| |
118 | 4 |ab|ab| |
119 | 5 |cd|cd| |
120 | |
121 | To merge these into a single 48-bit merged value 'val' in phase 2) |
122 | on little-endian we insert stores to higher (consecutive) bitpositions |
123 | into the most significant bits of the merged value. |
124 | The final merged value would be: 0xcdab56781234 |
125 | |
126 | For big-endian we insert stores to higher bitpositions into the least |
127 | significant bits of the merged value. |
128 | The final merged value would be: 0x12345678abcd |
129 | |
130 | Then, in phase 3), we want to emit this 48-bit value as a 32-bit store |
131 | followed by a 16-bit store. Again, we must consider endianness when |
132 | breaking down the 48-bit value 'val' computed above. |
133 | For little endian we emit: |
134 | [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff; |
135 | [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32; |
136 | |
137 | Whereas for big-endian we emit: |
138 | [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16; |
139 | [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */ |
140 | |
141 | #include "config.h" |
142 | #include "system.h" |
143 | #include "coretypes.h" |
144 | #include "backend.h" |
145 | #include "tree.h" |
146 | #include "gimple.h" |
147 | #include "builtins.h" |
148 | #include "fold-const.h" |
149 | #include "tree-pass.h" |
150 | #include "ssa.h" |
151 | #include "gimple-pretty-print.h" |
152 | #include "alias.h" |
153 | #include "fold-const.h" |
154 | #include "print-tree.h" |
155 | #include "tree-hash-traits.h" |
156 | #include "gimple-iterator.h" |
157 | #include "gimplify.h" |
158 | #include "gimple-fold.h" |
159 | #include "stor-layout.h" |
160 | #include "timevar.h" |
161 | #include "cfganal.h" |
162 | #include "cfgcleanup.h" |
163 | #include "tree-cfg.h" |
164 | #include "except.h" |
165 | #include "tree-eh.h" |
166 | #include "target.h" |
167 | #include "gimplify-me.h" |
168 | #include "rtl.h" |
169 | #include "expr.h" /* For get_bit_range. */ |
170 | #include "optabs-tree.h" |
171 | #include "dbgcnt.h" |
172 | #include "selftest.h" |
173 | |
174 | /* The maximum size (in bits) of the stores this pass should generate. */ |
175 | #define MAX_STORE_BITSIZE (BITS_PER_WORD) |
176 | #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT) |
177 | |
178 | /* Limit to bound the number of aliasing checks for loads with the same |
179 | vuse as the corresponding store. */ |
180 | #define MAX_STORE_ALIAS_CHECKS 64 |
181 | |
182 | namespace { |
183 | |
184 | struct bswap_stat |
185 | { |
186 | /* Number of hand-written 16-bit nop / bswaps found. */ |
187 | int found_16bit; |
188 | |
189 | /* Number of hand-written 32-bit nop / bswaps found. */ |
190 | int found_32bit; |
191 | |
192 | /* Number of hand-written 64-bit nop / bswaps found. */ |
193 | int found_64bit; |
194 | } nop_stats, bswap_stats; |
195 | |
196 | /* A symbolic number structure is used to detect byte permutation and selection |
197 | patterns of a source. To achieve that, its field N contains an artificial |
198 | number consisting of BITS_PER_MARKER sized markers tracking where does each |
199 | byte come from in the source: |
200 | |
201 | 0 - target byte has the value 0 |
202 | FF - target byte has an unknown value (eg. due to sign extension) |
203 | 1..size - marker value is the byte index in the source (0 for lsb). |
204 | |
205 | To detect permutations on memory sources (arrays and structures), a symbolic |
206 | number is also associated: |
207 | - a base address BASE_ADDR and an OFFSET giving the address of the source; |
208 | - a range which gives the difference between the highest and lowest accessed |
209 | memory location to make such a symbolic number; |
210 | - the address SRC of the source element of lowest address as a convenience |
211 | to easily get BASE_ADDR + offset + lowest bytepos; |
212 | - number of expressions N_OPS bitwise ored together to represent |
213 | approximate cost of the computation. |
214 | |
215 | Note 1: the range is different from size as size reflects the size of the |
216 | type of the current expression. For instance, for an array char a[], |
217 | (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while |
218 | (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this |
219 | time a range of 1. |
220 | |
221 | Note 2: for non-memory sources, range holds the same value as size. |
222 | |
223 | Note 3: SRC points to the SSA_NAME in case of non-memory source. */ |
224 | |
225 | struct symbolic_number { |
226 | uint64_t n; |
227 | tree type; |
228 | tree base_addr; |
229 | tree offset; |
230 | poly_int64 bytepos; |
231 | tree src; |
232 | tree alias_set; |
233 | tree vuse; |
234 | unsigned HOST_WIDE_INT range; |
235 | int n_ops; |
236 | }; |
237 | |
238 | #define BITS_PER_MARKER 8 |
239 | #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1) |
240 | #define MARKER_BYTE_UNKNOWN MARKER_MASK |
241 | #define HEAD_MARKER(n, size) \ |
242 | ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER))) |
243 | |
244 | /* The number which the find_bswap_or_nop_1 result should match in |
245 | order to have a nop. The number is masked according to the size of |
246 | the symbolic number before using it. */ |
247 | #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \ |
248 | (uint64_t)0x08070605 << 32 | 0x04030201) |
249 | |
250 | /* The number which the find_bswap_or_nop_1 result should match in |
251 | order to have a byte swap. The number is masked according to the |
252 | size of the symbolic number before using it. */ |
253 | #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \ |
254 | (uint64_t)0x01020304 << 32 | 0x05060708) |
255 | |
256 | /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic |
257 | number N. Return false if the requested operation is not permitted |
258 | on a symbolic number. */ |
259 | |
260 | inline bool |
261 | do_shift_rotate (enum tree_code code, |
262 | struct symbolic_number *n, |
263 | int count) |
264 | { |
265 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
266 | uint64_t head_marker; |
267 | |
268 | if (count < 0 |
269 | || count >= TYPE_PRECISION (n->type) |
270 | || count % BITS_PER_UNIT != 0) |
271 | return false; |
272 | count = (count / BITS_PER_UNIT) * BITS_PER_MARKER; |
273 | |
274 | /* Zero out the extra bits of N in order to avoid them being shifted |
275 | into the significant bits. */ |
276 | if (size < 64 / BITS_PER_MARKER) |
277 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
278 | |
279 | switch (code) |
280 | { |
281 | case LSHIFT_EXPR: |
282 | n->n <<= count; |
283 | break; |
284 | case RSHIFT_EXPR: |
285 | head_marker = HEAD_MARKER (n->n, size); |
286 | n->n >>= count; |
287 | /* Arithmetic shift of signed type: result is dependent on the value. */ |
288 | if (!TYPE_UNSIGNED (n->type) && head_marker) |
289 | for (i = 0; i < count / BITS_PER_MARKER; i++) |
290 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN |
291 | << ((size - 1 - i) * BITS_PER_MARKER); |
292 | break; |
293 | case LROTATE_EXPR: |
294 | n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count)); |
295 | break; |
296 | case RROTATE_EXPR: |
297 | n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count)); |
298 | break; |
299 | default: |
300 | return false; |
301 | } |
302 | /* Zero unused bits for size. */ |
303 | if (size < 64 / BITS_PER_MARKER) |
304 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
305 | return true; |
306 | } |
307 | |
308 | /* Perform sanity checking for the symbolic number N and the gimple |
309 | statement STMT. */ |
310 | |
311 | inline bool |
312 | verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt) |
313 | { |
314 | tree lhs_type; |
315 | |
316 | lhs_type = TREE_TYPE (gimple_get_lhs (stmt)); |
317 | |
318 | if (TREE_CODE (lhs_type) != INTEGER_TYPE |
319 | && TREE_CODE (lhs_type) != ENUMERAL_TYPE) |
320 | return false; |
321 | |
322 | if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type)) |
323 | return false; |
324 | |
325 | return true; |
326 | } |
327 | |
328 | /* Initialize the symbolic number N for the bswap pass from the base element |
329 | SRC manipulated by the bitwise OR expression. */ |
330 | |
331 | bool |
332 | init_symbolic_number (struct symbolic_number *n, tree src) |
333 | { |
334 | int size; |
335 | |
336 | if (!INTEGRAL_TYPE_P (TREE_TYPE (src)) && !POINTER_TYPE_P (TREE_TYPE (src))) |
337 | return false; |
338 | |
339 | n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE; |
340 | n->src = src; |
341 | |
342 | /* Set up the symbolic number N by setting each byte to a value between 1 and |
343 | the byte size of rhs1. The highest order byte is set to n->size and the |
344 | lowest order byte to 1. */ |
345 | n->type = TREE_TYPE (src); |
346 | size = TYPE_PRECISION (n->type); |
347 | if (size % BITS_PER_UNIT != 0) |
348 | return false; |
349 | size /= BITS_PER_UNIT; |
350 | if (size > 64 / BITS_PER_MARKER) |
351 | return false; |
352 | n->range = size; |
353 | n->n = CMPNOP; |
354 | n->n_ops = 1; |
355 | |
356 | if (size < 64 / BITS_PER_MARKER) |
357 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; |
358 | |
359 | return true; |
360 | } |
361 | |
362 | /* Check if STMT might be a byte swap or a nop from a memory source and returns |
363 | the answer. If so, REF is that memory source and the base of the memory area |
364 | accessed and the offset of the access from that base are recorded in N. */ |
365 | |
366 | bool |
367 | find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n) |
368 | { |
369 | /* Leaf node is an array or component ref. Memorize its base and |
370 | offset from base to compare to other such leaf node. */ |
371 | poly_int64 bitsize, bitpos, bytepos; |
372 | machine_mode mode; |
373 | int unsignedp, reversep, volatilep; |
374 | tree offset, base_addr; |
375 | |
376 | /* Not prepared to handle PDP endian. */ |
377 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) |
378 | return false; |
379 | |
380 | if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt)) |
381 | return false; |
382 | |
383 | base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode, |
384 | &unsignedp, &reversep, &volatilep); |
385 | |
386 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) |
387 | /* Do not rewrite TARGET_MEM_REF. */ |
388 | return false; |
389 | else if (TREE_CODE (base_addr) == MEM_REF) |
390 | { |
391 | poly_offset_int bit_offset = 0; |
392 | tree off = TREE_OPERAND (base_addr, 1); |
393 | |
394 | if (!integer_zerop (off)) |
395 | { |
396 | poly_offset_int boff = mem_ref_offset (base_addr); |
397 | boff <<= LOG2_BITS_PER_UNIT; |
398 | bit_offset += boff; |
399 | } |
400 | |
401 | base_addr = TREE_OPERAND (base_addr, 0); |
402 | |
403 | /* Avoid returning a negative bitpos as this may wreak havoc later. */ |
404 | if (maybe_lt (a: bit_offset, b: 0)) |
405 | { |
406 | tree byte_offset = wide_int_to_tree |
407 | (sizetype, bits_to_bytes_round_down (bit_offset)); |
408 | bit_offset = num_trailing_bits (bit_offset); |
409 | if (offset) |
410 | offset = size_binop (PLUS_EXPR, offset, byte_offset); |
411 | else |
412 | offset = byte_offset; |
413 | } |
414 | |
415 | bitpos += bit_offset.force_shwi (); |
416 | } |
417 | else |
418 | base_addr = build_fold_addr_expr (base_addr); |
419 | |
420 | if (!multiple_p (a: bitpos, BITS_PER_UNIT, multiple: &bytepos)) |
421 | return false; |
422 | if (!multiple_p (a: bitsize, BITS_PER_UNIT)) |
423 | return false; |
424 | if (reversep) |
425 | return false; |
426 | |
427 | if (!init_symbolic_number (n, src: ref)) |
428 | return false; |
429 | n->base_addr = base_addr; |
430 | n->offset = offset; |
431 | n->bytepos = bytepos; |
432 | n->alias_set = reference_alias_ptr_type (ref); |
433 | n->vuse = gimple_vuse (g: stmt); |
434 | return true; |
435 | } |
436 | |
437 | /* Compute the symbolic number N representing the result of a bitwise OR, |
438 | bitwise XOR or plus on 2 symbolic number N1 and N2 whose source statements |
439 | are respectively SOURCE_STMT1 and SOURCE_STMT2. CODE is the operation. */ |
440 | |
441 | gimple * |
442 | perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1, |
443 | gimple *source_stmt2, struct symbolic_number *n2, |
444 | struct symbolic_number *n, enum tree_code code) |
445 | { |
446 | int i, size; |
447 | uint64_t mask; |
448 | gimple *source_stmt; |
449 | struct symbolic_number *n_start; |
450 | |
451 | tree rhs1 = gimple_assign_rhs1 (gs: source_stmt1); |
452 | if (TREE_CODE (rhs1) == BIT_FIELD_REF |
453 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) |
454 | rhs1 = TREE_OPERAND (rhs1, 0); |
455 | tree rhs2 = gimple_assign_rhs1 (gs: source_stmt2); |
456 | if (TREE_CODE (rhs2) == BIT_FIELD_REF |
457 | && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME) |
458 | rhs2 = TREE_OPERAND (rhs2, 0); |
459 | |
460 | /* Sources are different, cancel bswap if they are not memory location with |
461 | the same base (array, structure, ...). */ |
462 | if (rhs1 != rhs2) |
463 | { |
464 | uint64_t inc; |
465 | HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end; |
466 | struct symbolic_number *toinc_n_ptr, *n_end; |
467 | basic_block bb1, bb2; |
468 | |
469 | if (!n1->base_addr || !n2->base_addr |
470 | || !operand_equal_p (n1->base_addr, n2->base_addr, flags: 0)) |
471 | return NULL; |
472 | |
473 | if (!n1->offset != !n2->offset |
474 | || (n1->offset && !operand_equal_p (n1->offset, n2->offset, flags: 0))) |
475 | return NULL; |
476 | |
477 | start1 = 0; |
478 | if (!(n2->bytepos - n1->bytepos).is_constant (const_value: &start2)) |
479 | return NULL; |
480 | |
481 | if (start1 < start2) |
482 | { |
483 | n_start = n1; |
484 | start_sub = start2 - start1; |
485 | } |
486 | else |
487 | { |
488 | n_start = n2; |
489 | start_sub = start1 - start2; |
490 | } |
491 | |
492 | bb1 = gimple_bb (g: source_stmt1); |
493 | bb2 = gimple_bb (g: source_stmt2); |
494 | if (dominated_by_p (CDI_DOMINATORS, bb1, bb2)) |
495 | source_stmt = source_stmt1; |
496 | else |
497 | source_stmt = source_stmt2; |
498 | |
499 | /* Find the highest address at which a load is performed and |
500 | compute related info. */ |
501 | end1 = start1 + (n1->range - 1); |
502 | end2 = start2 + (n2->range - 1); |
503 | if (end1 < end2) |
504 | { |
505 | end = end2; |
506 | end_sub = end2 - end1; |
507 | } |
508 | else |
509 | { |
510 | end = end1; |
511 | end_sub = end1 - end2; |
512 | } |
513 | n_end = (end2 > end1) ? n2 : n1; |
514 | |
515 | /* Find symbolic number whose lsb is the most significant. */ |
516 | if (BYTES_BIG_ENDIAN) |
517 | toinc_n_ptr = (n_end == n1) ? n2 : n1; |
518 | else |
519 | toinc_n_ptr = (n_start == n1) ? n2 : n1; |
520 | |
521 | n->range = end - MIN (start1, start2) + 1; |
522 | |
523 | /* Check that the range of memory covered can be represented by |
524 | a symbolic number. */ |
525 | if (n->range > 64 / BITS_PER_MARKER) |
526 | return NULL; |
527 | |
528 | /* Reinterpret byte marks in symbolic number holding the value of |
529 | bigger weight according to target endianness. */ |
530 | inc = BYTES_BIG_ENDIAN ? end_sub : start_sub; |
531 | size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT; |
532 | for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER) |
533 | { |
534 | unsigned marker |
535 | = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK; |
536 | if (marker && marker != MARKER_BYTE_UNKNOWN) |
537 | toinc_n_ptr->n += inc; |
538 | } |
539 | } |
540 | else |
541 | { |
542 | n->range = n1->range; |
543 | n_start = n1; |
544 | source_stmt = source_stmt1; |
545 | } |
546 | |
547 | if (!n1->alias_set |
548 | || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set)) |
549 | n->alias_set = n1->alias_set; |
550 | else |
551 | n->alias_set = ptr_type_node; |
552 | n->vuse = n_start->vuse; |
553 | n->base_addr = n_start->base_addr; |
554 | n->offset = n_start->offset; |
555 | n->src = n_start->src; |
556 | n->bytepos = n_start->bytepos; |
557 | n->type = n_start->type; |
558 | size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
559 | uint64_t res_n = n1->n | n2->n; |
560 | |
561 | for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER) |
562 | { |
563 | uint64_t masked1, masked2; |
564 | |
565 | masked1 = n1->n & mask; |
566 | masked2 = n2->n & mask; |
567 | /* If at least one byte is 0, all of 0 | x == 0 ^ x == 0 + x == x. */ |
568 | if (masked1 && masked2) |
569 | { |
570 | /* + can carry into upper bits, just punt. */ |
571 | if (code == PLUS_EXPR) |
572 | return NULL; |
573 | /* x | x is still x. */ |
574 | if (code == BIT_IOR_EXPR && masked1 == masked2) |
575 | continue; |
576 | if (code == BIT_XOR_EXPR) |
577 | { |
578 | /* x ^ x is 0, but MARKER_BYTE_UNKNOWN stands for |
579 | unknown values and unknown ^ unknown is unknown. */ |
580 | if (masked1 == masked2 |
581 | && masked1 != ((uint64_t) MARKER_BYTE_UNKNOWN |
582 | << i * BITS_PER_MARKER)) |
583 | { |
584 | res_n &= ~mask; |
585 | continue; |
586 | } |
587 | } |
588 | /* Otherwise set the byte to unknown, it might still be |
589 | later masked off. */ |
590 | res_n |= mask; |
591 | } |
592 | } |
593 | n->n = res_n; |
594 | n->n_ops = n1->n_ops + n2->n_ops; |
595 | |
596 | return source_stmt; |
597 | } |
598 | |
599 | /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform |
600 | the operation given by the rhs of STMT on the result. If the operation |
601 | could successfully be executed the function returns a gimple stmt whose |
602 | rhs's first tree is the expression of the source operand and NULL |
603 | otherwise. */ |
604 | |
605 | gimple * |
606 | find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit) |
607 | { |
608 | enum tree_code code; |
609 | tree rhs1, rhs2 = NULL; |
610 | gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1; |
611 | enum gimple_rhs_class rhs_class; |
612 | |
613 | if (!limit || !is_gimple_assign (gs: stmt)) |
614 | return NULL; |
615 | |
616 | rhs1 = gimple_assign_rhs1 (gs: stmt); |
617 | |
618 | if (find_bswap_or_nop_load (stmt, ref: rhs1, n)) |
619 | return stmt; |
620 | |
621 | /* Handle BIT_FIELD_REF. */ |
622 | if (TREE_CODE (rhs1) == BIT_FIELD_REF |
623 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) |
624 | { |
625 | if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1)) |
626 | || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2))) |
627 | return NULL; |
628 | |
629 | unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1)); |
630 | unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2)); |
631 | if (bitpos % BITS_PER_UNIT == 0 |
632 | && bitsize % BITS_PER_UNIT == 0 |
633 | && init_symbolic_number (n, TREE_OPERAND (rhs1, 0))) |
634 | { |
635 | /* Handle big-endian bit numbering in BIT_FIELD_REF. */ |
636 | if (BYTES_BIG_ENDIAN) |
637 | bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize; |
638 | |
639 | /* Shift. */ |
640 | if (!do_shift_rotate (code: RSHIFT_EXPR, n, count: bitpos)) |
641 | return NULL; |
642 | |
643 | /* Mask. */ |
644 | uint64_t mask = 0; |
645 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; |
646 | for (unsigned i = 0; i < bitsize / BITS_PER_UNIT; |
647 | i++, tmp <<= BITS_PER_UNIT) |
648 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); |
649 | n->n &= mask; |
650 | |
651 | /* Convert. */ |
652 | n->type = TREE_TYPE (rhs1); |
653 | if (!verify_symbolic_number_p (n, stmt)) |
654 | return NULL; |
655 | |
656 | if (!n->base_addr) |
657 | n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
658 | |
659 | return stmt; |
660 | } |
661 | |
662 | return NULL; |
663 | } |
664 | |
665 | if (TREE_CODE (rhs1) != SSA_NAME) |
666 | return NULL; |
667 | |
668 | code = gimple_assign_rhs_code (gs: stmt); |
669 | rhs_class = gimple_assign_rhs_class (gs: stmt); |
670 | rhs1_stmt = SSA_NAME_DEF_STMT (rhs1); |
671 | |
672 | if (rhs_class == GIMPLE_BINARY_RHS) |
673 | rhs2 = gimple_assign_rhs2 (gs: stmt); |
674 | |
675 | /* Handle unary rhs and binary rhs with integer constants as second |
676 | operand. */ |
677 | |
678 | if (rhs_class == GIMPLE_UNARY_RHS |
679 | || (rhs_class == GIMPLE_BINARY_RHS |
680 | && TREE_CODE (rhs2) == INTEGER_CST)) |
681 | { |
682 | if (code != BIT_AND_EXPR |
683 | && code != LSHIFT_EXPR |
684 | && code != RSHIFT_EXPR |
685 | && code != LROTATE_EXPR |
686 | && code != RROTATE_EXPR |
687 | && !CONVERT_EXPR_CODE_P (code)) |
688 | return NULL; |
689 | |
690 | source_stmt1 = find_bswap_or_nop_1 (stmt: rhs1_stmt, n, limit: limit - 1); |
691 | |
692 | /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and |
693 | we have to initialize the symbolic number. */ |
694 | if (!source_stmt1) |
695 | { |
696 | if (gimple_assign_load_p (stmt) |
697 | || !init_symbolic_number (n, src: rhs1)) |
698 | return NULL; |
699 | source_stmt1 = stmt; |
700 | } |
701 | |
702 | switch (code) |
703 | { |
704 | case BIT_AND_EXPR: |
705 | { |
706 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
707 | uint64_t val = int_cst_value (rhs2), mask = 0; |
708 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; |
709 | |
710 | /* Only constants masking full bytes are allowed. */ |
711 | for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT) |
712 | if ((val & tmp) != 0 && (val & tmp) != tmp) |
713 | return NULL; |
714 | else if (val & tmp) |
715 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); |
716 | |
717 | n->n &= mask; |
718 | } |
719 | break; |
720 | case LSHIFT_EXPR: |
721 | case RSHIFT_EXPR: |
722 | case LROTATE_EXPR: |
723 | case RROTATE_EXPR: |
724 | if (!do_shift_rotate (code, n, count: (int) TREE_INT_CST_LOW (rhs2))) |
725 | return NULL; |
726 | break; |
727 | CASE_CONVERT: |
728 | { |
729 | int i, type_size, old_type_size; |
730 | tree type; |
731 | |
732 | type = TREE_TYPE (gimple_assign_lhs (stmt)); |
733 | type_size = TYPE_PRECISION (type); |
734 | if (type_size % BITS_PER_UNIT != 0) |
735 | return NULL; |
736 | type_size /= BITS_PER_UNIT; |
737 | if (type_size > 64 / BITS_PER_MARKER) |
738 | return NULL; |
739 | |
740 | /* Sign extension: result is dependent on the value. */ |
741 | old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; |
742 | if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size |
743 | && HEAD_MARKER (n->n, old_type_size)) |
744 | for (i = 0; i < type_size - old_type_size; i++) |
745 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN |
746 | << ((type_size - 1 - i) * BITS_PER_MARKER); |
747 | |
748 | if (type_size < 64 / BITS_PER_MARKER) |
749 | { |
750 | /* If STMT casts to a smaller type mask out the bits not |
751 | belonging to the target type. */ |
752 | n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1; |
753 | } |
754 | n->type = type; |
755 | if (!n->base_addr) |
756 | n->range = type_size; |
757 | } |
758 | break; |
759 | default: |
760 | return NULL; |
761 | }; |
762 | return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL; |
763 | } |
764 | |
765 | /* Handle binary rhs. */ |
766 | |
767 | if (rhs_class == GIMPLE_BINARY_RHS) |
768 | { |
769 | struct symbolic_number n1, n2; |
770 | gimple *source_stmt, *source_stmt2; |
771 | |
772 | if (!rhs2 || TREE_CODE (rhs2) != SSA_NAME) |
773 | return NULL; |
774 | |
775 | rhs2_stmt = SSA_NAME_DEF_STMT (rhs2); |
776 | |
777 | switch (code) |
778 | { |
779 | case BIT_IOR_EXPR: |
780 | case BIT_XOR_EXPR: |
781 | case PLUS_EXPR: |
782 | source_stmt1 = find_bswap_or_nop_1 (stmt: rhs1_stmt, n: &n1, limit: limit - 1); |
783 | |
784 | if (!source_stmt1) |
785 | return NULL; |
786 | |
787 | source_stmt2 = find_bswap_or_nop_1 (stmt: rhs2_stmt, n: &n2, limit: limit - 1); |
788 | |
789 | if (!source_stmt2) |
790 | return NULL; |
791 | |
792 | if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type)) |
793 | return NULL; |
794 | |
795 | if (n1.vuse != n2.vuse) |
796 | return NULL; |
797 | |
798 | source_stmt |
799 | = perform_symbolic_merge (source_stmt1, n1: &n1, source_stmt2, n2: &n2, n, |
800 | code); |
801 | |
802 | if (!source_stmt) |
803 | return NULL; |
804 | |
805 | if (!verify_symbolic_number_p (n, stmt)) |
806 | return NULL; |
807 | |
808 | break; |
809 | default: |
810 | return NULL; |
811 | } |
812 | return source_stmt; |
813 | } |
814 | return NULL; |
815 | } |
816 | |
817 | /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute |
818 | *CMPXCHG, *CMPNOP and adjust *N. */ |
819 | |
820 | void |
821 | find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg, |
822 | uint64_t *cmpnop, bool *cast64_to_32) |
823 | { |
824 | unsigned rsize; |
825 | uint64_t tmpn, mask; |
826 | |
827 | /* The number which the find_bswap_or_nop_1 result should match in order |
828 | to have a full byte swap. The number is shifted to the right |
829 | according to the size of the symbolic number before using it. */ |
830 | *cmpxchg = CMPXCHG; |
831 | *cmpnop = CMPNOP; |
832 | *cast64_to_32 = false; |
833 | |
834 | /* Find real size of result (highest non-zero byte). */ |
835 | if (n->base_addr) |
836 | for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++); |
837 | else |
838 | rsize = n->range; |
839 | |
840 | /* Zero out the bits corresponding to untouched bytes in original gimple |
841 | expression. */ |
842 | if (n->range < (int) sizeof (int64_t)) |
843 | { |
844 | mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1; |
845 | if (n->base_addr == NULL |
846 | && n->range == 4 |
847 | && int_size_in_bytes (TREE_TYPE (n->src)) == 8) |
848 | { |
849 | /* If all bytes in n->n are either 0 or in [5..8] range, this |
850 | might be a candidate for (unsigned) __builtin_bswap64 (src). |
851 | It is not worth it for (unsigned short) __builtin_bswap64 (src) |
852 | or (unsigned short) __builtin_bswap32 (src). */ |
853 | *cast64_to_32 = true; |
854 | for (tmpn = n->n; tmpn; tmpn >>= BITS_PER_MARKER) |
855 | if ((tmpn & MARKER_MASK) |
856 | && ((tmpn & MARKER_MASK) <= 4 || (tmpn & MARKER_MASK) > 8)) |
857 | { |
858 | *cast64_to_32 = false; |
859 | break; |
860 | } |
861 | } |
862 | if (*cast64_to_32) |
863 | *cmpxchg &= mask; |
864 | else |
865 | *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER; |
866 | *cmpnop &= mask; |
867 | } |
868 | |
869 | /* Zero out the bits corresponding to unused bytes in the result of the |
870 | gimple expression. */ |
871 | if (rsize < n->range) |
872 | { |
873 | if (BYTES_BIG_ENDIAN) |
874 | { |
875 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; |
876 | *cmpxchg &= mask; |
877 | if (n->range - rsize == sizeof (int64_t)) |
878 | *cmpnop = 0; |
879 | else |
880 | *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER; |
881 | } |
882 | else |
883 | { |
884 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; |
885 | if (n->range - rsize == sizeof (int64_t)) |
886 | *cmpxchg = 0; |
887 | else |
888 | *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER; |
889 | *cmpnop &= mask; |
890 | } |
891 | n->range = rsize; |
892 | } |
893 | |
894 | if (*cast64_to_32) |
895 | n->range = 8; |
896 | n->range *= BITS_PER_UNIT; |
897 | } |
898 | |
899 | /* Helper function for find_bswap_or_nop, |
900 | Return true if N is a swap or nop with MASK. */ |
901 | static bool |
902 | is_bswap_or_nop_p (uint64_t n, uint64_t cmpxchg, |
903 | uint64_t cmpnop, uint64_t* mask, |
904 | bool* bswap) |
905 | { |
906 | *mask = ~(uint64_t) 0; |
907 | if (n == cmpnop) |
908 | *bswap = false; |
909 | else if (n == cmpxchg) |
910 | *bswap = true; |
911 | else |
912 | { |
913 | int set = 0; |
914 | for (uint64_t msk = MARKER_MASK; msk; msk <<= BITS_PER_MARKER) |
915 | if ((n & msk) == 0) |
916 | *mask &= ~msk; |
917 | else if ((n & msk) == (cmpxchg & msk)) |
918 | set++; |
919 | else |
920 | return false; |
921 | |
922 | if (set < 2) |
923 | return false; |
924 | *bswap = true; |
925 | } |
926 | return true; |
927 | } |
928 | |
929 | |
930 | /* Check if STMT completes a bswap implementation or a read in a given |
931 | endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP |
932 | accordingly. It also sets N to represent the kind of operations |
933 | performed: size of the resulting expression and whether it works on |
934 | a memory source, and if so alias-set and vuse. At last, the |
935 | function returns a stmt whose rhs's first tree is the source |
936 | expression. */ |
937 | |
938 | gimple * |
939 | find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap, |
940 | bool *cast64_to_32, uint64_t *mask, uint64_t* l_rotate) |
941 | { |
942 | tree type_size = TYPE_SIZE_UNIT (TREE_TYPE (gimple_get_lhs (stmt))); |
943 | if (!tree_fits_uhwi_p (type_size)) |
944 | return NULL; |
945 | |
946 | /* The last parameter determines the depth search limit. It usually |
947 | correlates directly to the number n of bytes to be touched. We |
948 | increase that number by 2 * (log2(n) + 1) here in order to also |
949 | cover signed -> unsigned conversions of the src operand as can be seen |
950 | in libgcc, and for initial shift/and operation of the src operand. */ |
951 | int limit = tree_to_uhwi (type_size); |
952 | limit += 2 * (1 + (int) ceil_log2 (x: (unsigned HOST_WIDE_INT) limit)); |
953 | gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit); |
954 | |
955 | if (!ins_stmt) |
956 | { |
957 | if (gimple_assign_rhs_code (gs: stmt) != CONSTRUCTOR |
958 | || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) |
959 | return NULL; |
960 | unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT; |
961 | if (sz != 16 && sz != 32 && sz != 64) |
962 | return NULL; |
963 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
964 | if (CONSTRUCTOR_NELTS (rhs) == 0) |
965 | return NULL; |
966 | tree eltype = TREE_TYPE (TREE_TYPE (rhs)); |
967 | unsigned HOST_WIDE_INT eltsz |
968 | = int_size_in_bytes (eltype) * BITS_PER_UNIT; |
969 | if (TYPE_PRECISION (eltype) != eltsz) |
970 | return NULL; |
971 | constructor_elt *elt; |
972 | unsigned int i; |
973 | tree type = build_nonstandard_integer_type (sz, 1); |
974 | FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt) |
975 | { |
976 | if (TREE_CODE (elt->value) != SSA_NAME |
977 | || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value))) |
978 | return NULL; |
979 | struct symbolic_number n1; |
980 | gimple *source_stmt |
981 | = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), n: &n1, |
982 | limit: limit - 1); |
983 | |
984 | if (!source_stmt) |
985 | return NULL; |
986 | |
987 | n1.type = type; |
988 | if (!n1.base_addr) |
989 | n1.range = sz / BITS_PER_UNIT; |
990 | |
991 | if (i == 0) |
992 | { |
993 | ins_stmt = source_stmt; |
994 | *n = n1; |
995 | } |
996 | else |
997 | { |
998 | if (n->vuse != n1.vuse) |
999 | return NULL; |
1000 | |
1001 | struct symbolic_number n0 = *n; |
1002 | |
1003 | if (!BYTES_BIG_ENDIAN) |
1004 | { |
1005 | if (!do_shift_rotate (code: LSHIFT_EXPR, n: &n1, count: i * eltsz)) |
1006 | return NULL; |
1007 | } |
1008 | else if (!do_shift_rotate (code: LSHIFT_EXPR, n: &n0, count: eltsz)) |
1009 | return NULL; |
1010 | ins_stmt |
1011 | = perform_symbolic_merge (source_stmt1: ins_stmt, n1: &n0, source_stmt2: source_stmt, n2: &n1, n, |
1012 | code: BIT_IOR_EXPR); |
1013 | |
1014 | if (!ins_stmt) |
1015 | return NULL; |
1016 | } |
1017 | } |
1018 | } |
1019 | |
1020 | uint64_t cmpxchg, cmpnop; |
1021 | uint64_t orig_range = n->range * BITS_PER_UNIT; |
1022 | find_bswap_or_nop_finalize (n, cmpxchg: &cmpxchg, cmpnop: &cmpnop, cast64_to_32); |
1023 | |
1024 | /* A complete byte swap should make the symbolic number to start with |
1025 | the largest digit in the highest order byte. Unchanged symbolic |
1026 | number indicates a read with same endianness as target architecture. */ |
1027 | *l_rotate = 0; |
1028 | uint64_t tmp_n = n->n; |
1029 | if (!is_bswap_or_nop_p (n: tmp_n, cmpxchg, cmpnop, mask, bswap)) |
1030 | { |
1031 | /* Try bswap + lrotate. */ |
1032 | /* TODO, handle cast64_to_32 and big/litte_endian memory |
1033 | source when rsize < range. */ |
1034 | if (n->range == orig_range |
1035 | /* There're case like 0x300000200 for uint32->uint64 cast, |
1036 | Don't hanlde this. */ |
1037 | && n->range == TYPE_PRECISION (n->type) |
1038 | && ((orig_range == 32 |
1039 | && optab_handler (op: rotl_optab, SImode) != CODE_FOR_nothing) |
1040 | || (orig_range == 64 |
1041 | && optab_handler (op: rotl_optab, DImode) != CODE_FOR_nothing)) |
1042 | && (tmp_n & MARKER_MASK) < orig_range / BITS_PER_UNIT) |
1043 | { |
1044 | uint64_t range = (orig_range / BITS_PER_UNIT) * BITS_PER_MARKER; |
1045 | uint64_t count = (tmp_n & MARKER_MASK) * BITS_PER_MARKER; |
1046 | /* .i.e. hanlde 0x203040506070800 when lower byte is zero. */ |
1047 | if (!count) |
1048 | { |
1049 | for (uint64_t i = 1; i != range / BITS_PER_MARKER; i++) |
1050 | { |
1051 | count = (tmp_n >> i * BITS_PER_MARKER) & MARKER_MASK; |
1052 | if (count) |
1053 | { |
1054 | /* Count should be meaningful not 0xff. */ |
1055 | if (count <= range / BITS_PER_MARKER) |
1056 | { |
1057 | count = (count + i) * BITS_PER_MARKER % range; |
1058 | break; |
1059 | } |
1060 | else |
1061 | return NULL; |
1062 | } |
1063 | } |
1064 | } |
1065 | tmp_n = tmp_n >> count | tmp_n << (range - count); |
1066 | if (orig_range == 32) |
1067 | tmp_n &= (1ULL << 32) - 1; |
1068 | if (!is_bswap_or_nop_p (n: tmp_n, cmpxchg, cmpnop, mask, bswap)) |
1069 | return NULL; |
1070 | *l_rotate = count / BITS_PER_MARKER * BITS_PER_UNIT; |
1071 | gcc_assert (*bswap); |
1072 | } |
1073 | else |
1074 | return NULL; |
1075 | } |
1076 | |
1077 | /* Useless bit manipulation performed by code. */ |
1078 | if (!n->base_addr && n->n == cmpnop && n->n_ops == 1) |
1079 | return NULL; |
1080 | |
1081 | return ins_stmt; |
1082 | } |
1083 | |
1084 | const pass_data pass_data_optimize_bswap = |
1085 | { |
1086 | .type: GIMPLE_PASS, /* type */ |
1087 | .name: "bswap" , /* name */ |
1088 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
1089 | .tv_id: TV_NONE, /* tv_id */ |
1090 | PROP_ssa, /* properties_required */ |
1091 | .properties_provided: 0, /* properties_provided */ |
1092 | .properties_destroyed: 0, /* properties_destroyed */ |
1093 | .todo_flags_start: 0, /* todo_flags_start */ |
1094 | .todo_flags_finish: 0, /* todo_flags_finish */ |
1095 | }; |
1096 | |
1097 | class pass_optimize_bswap : public gimple_opt_pass |
1098 | { |
1099 | public: |
1100 | pass_optimize_bswap (gcc::context *ctxt) |
1101 | : gimple_opt_pass (pass_data_optimize_bswap, ctxt) |
1102 | {} |
1103 | |
1104 | /* opt_pass methods: */ |
1105 | bool gate (function *) final override |
1106 | { |
1107 | return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8; |
1108 | } |
1109 | |
1110 | unsigned int execute (function *) final override; |
1111 | |
1112 | }; // class pass_optimize_bswap |
1113 | |
1114 | /* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from |
1115 | VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast |
1116 | first. */ |
1117 | |
1118 | static tree |
1119 | bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val, |
1120 | bool before) |
1121 | { |
1122 | gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val)) |
1123 | || POINTER_TYPE_P (TREE_TYPE (val))); |
1124 | if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val))) |
1125 | { |
1126 | HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type)); |
1127 | if (POINTER_TYPE_P (TREE_TYPE (val))) |
1128 | { |
1129 | gimple *g |
1130 | = gimple_build_assign (make_ssa_name (pointer_sized_int_node), |
1131 | NOP_EXPR, val); |
1132 | if (before) |
1133 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1134 | else |
1135 | gsi_insert_after (gsi, g, GSI_NEW_STMT); |
1136 | val = gimple_assign_lhs (gs: g); |
1137 | } |
1138 | tree itype = build_nonstandard_integer_type (prec, 1); |
1139 | gimple *g = gimple_build_assign (make_ssa_name (var: itype), NOP_EXPR, val); |
1140 | if (before) |
1141 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1142 | else |
1143 | gsi_insert_after (gsi, g, GSI_NEW_STMT); |
1144 | val = gimple_assign_lhs (gs: g); |
1145 | } |
1146 | return build1 (VIEW_CONVERT_EXPR, type, val); |
1147 | } |
1148 | |
1149 | /* Perform the bswap optimization: replace the expression computed in the rhs |
1150 | of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent |
1151 | bswap, load or load + bswap expression. |
1152 | Which of these alternatives replace the rhs is given by N->base_addr (non |
1153 | null if a load is needed) and BSWAP. The type, VUSE and set-alias of the |
1154 | load to perform are also given in N while the builtin bswap invoke is given |
1155 | in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the |
1156 | load statements involved to construct the rhs in gsi_stmt (GSI) and |
1157 | N->range gives the size of the rhs expression for maintaining some |
1158 | statistics. |
1159 | |
1160 | Note that if the replacement involve a load and if gsi_stmt (GSI) is |
1161 | non-NULL, that stmt is moved just after INS_STMT to do the load with the |
1162 | same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */ |
1163 | |
1164 | tree |
1165 | bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl, |
1166 | tree bswap_type, tree load_type, struct symbolic_number *n, |
1167 | bool bswap, uint64_t mask, uint64_t l_rotate) |
1168 | { |
1169 | tree src, tmp, tgt = NULL_TREE; |
1170 | gimple *bswap_stmt, *mask_stmt = NULL, *rotl_stmt = NULL; |
1171 | tree_code conv_code = NOP_EXPR; |
1172 | |
1173 | gimple *cur_stmt = gsi_stmt (i: gsi); |
1174 | src = n->src; |
1175 | if (cur_stmt) |
1176 | { |
1177 | tgt = gimple_assign_lhs (gs: cur_stmt); |
1178 | if (gimple_assign_rhs_code (gs: cur_stmt) == CONSTRUCTOR |
1179 | && tgt |
1180 | && VECTOR_TYPE_P (TREE_TYPE (tgt))) |
1181 | conv_code = VIEW_CONVERT_EXPR; |
1182 | } |
1183 | |
1184 | /* Need to load the value from memory first. */ |
1185 | if (n->base_addr) |
1186 | { |
1187 | gimple_stmt_iterator gsi_ins = gsi; |
1188 | if (ins_stmt) |
1189 | gsi_ins = gsi_for_stmt (ins_stmt); |
1190 | tree addr_expr, addr_tmp, val_expr, val_tmp; |
1191 | tree load_offset_ptr, aligned_load_type; |
1192 | gimple *load_stmt; |
1193 | unsigned align = get_object_alignment (src); |
1194 | poly_int64 load_offset = 0; |
1195 | |
1196 | if (cur_stmt) |
1197 | { |
1198 | basic_block ins_bb = gimple_bb (g: ins_stmt); |
1199 | basic_block cur_bb = gimple_bb (g: cur_stmt); |
1200 | if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb)) |
1201 | return NULL_TREE; |
1202 | |
1203 | /* Move cur_stmt just before one of the load of the original |
1204 | to ensure it has the same VUSE. See PR61517 for what could |
1205 | go wrong. */ |
1206 | if (gimple_bb (g: cur_stmt) != gimple_bb (g: ins_stmt)) |
1207 | reset_flow_sensitive_info (gimple_assign_lhs (gs: cur_stmt)); |
1208 | gsi_move_before (&gsi, &gsi_ins); |
1209 | gsi = gsi_for_stmt (cur_stmt); |
1210 | } |
1211 | else |
1212 | gsi = gsi_ins; |
1213 | |
1214 | /* Compute address to load from and cast according to the size |
1215 | of the load. */ |
1216 | addr_expr = build_fold_addr_expr (src); |
1217 | if (is_gimple_mem_ref_addr (addr_expr)) |
1218 | addr_tmp = unshare_expr (addr_expr); |
1219 | else |
1220 | { |
1221 | addr_tmp = unshare_expr (n->base_addr); |
1222 | if (!is_gimple_mem_ref_addr (addr_tmp)) |
1223 | addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp, |
1224 | is_gimple_mem_ref_addr, |
1225 | NULL_TREE, true, |
1226 | GSI_SAME_STMT); |
1227 | load_offset = n->bytepos; |
1228 | if (n->offset) |
1229 | { |
1230 | tree off |
1231 | = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset), |
1232 | true, NULL_TREE, true, |
1233 | GSI_SAME_STMT); |
1234 | gimple *stmt |
1235 | = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)), |
1236 | POINTER_PLUS_EXPR, addr_tmp, off); |
1237 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); |
1238 | addr_tmp = gimple_assign_lhs (gs: stmt); |
1239 | } |
1240 | } |
1241 | |
1242 | /* Perform the load. */ |
1243 | aligned_load_type = load_type; |
1244 | if (align < TYPE_ALIGN (load_type)) |
1245 | aligned_load_type = build_aligned_type (load_type, align); |
1246 | load_offset_ptr = build_int_cst (n->alias_set, load_offset); |
1247 | val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp, |
1248 | load_offset_ptr); |
1249 | |
1250 | if (!bswap) |
1251 | { |
1252 | if (n->range == 16) |
1253 | nop_stats.found_16bit++; |
1254 | else if (n->range == 32) |
1255 | nop_stats.found_32bit++; |
1256 | else |
1257 | { |
1258 | gcc_assert (n->range == 64); |
1259 | nop_stats.found_64bit++; |
1260 | } |
1261 | |
1262 | /* Convert the result of load if necessary. */ |
1263 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type)) |
1264 | { |
1265 | val_tmp = make_temp_ssa_name (type: aligned_load_type, NULL, |
1266 | name: "load_dst" ); |
1267 | load_stmt = gimple_build_assign (val_tmp, val_expr); |
1268 | gimple_set_vuse (g: load_stmt, vuse: n->vuse); |
1269 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); |
1270 | if (conv_code == VIEW_CONVERT_EXPR) |
1271 | val_tmp = bswap_view_convert (gsi: &gsi, TREE_TYPE (tgt), val: val_tmp, |
1272 | before: true); |
1273 | gimple_assign_set_rhs_with_ops (gsi: &gsi, code: conv_code, op1: val_tmp); |
1274 | update_stmt (s: cur_stmt); |
1275 | } |
1276 | else if (cur_stmt) |
1277 | { |
1278 | gimple_assign_set_rhs_with_ops (gsi: &gsi, code: MEM_REF, op1: val_expr); |
1279 | gimple_set_vuse (g: cur_stmt, vuse: n->vuse); |
1280 | update_stmt (s: cur_stmt); |
1281 | } |
1282 | else |
1283 | { |
1284 | tgt = make_ssa_name (var: load_type); |
1285 | cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr); |
1286 | gimple_set_vuse (g: cur_stmt, vuse: n->vuse); |
1287 | gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT); |
1288 | } |
1289 | |
1290 | if (dump_file) |
1291 | { |
1292 | fprintf (stream: dump_file, |
1293 | format: "%d bit load in target endianness found at: " , |
1294 | (int) n->range); |
1295 | print_gimple_stmt (dump_file, cur_stmt, 0); |
1296 | } |
1297 | return tgt; |
1298 | } |
1299 | else |
1300 | { |
1301 | val_tmp = make_temp_ssa_name (type: aligned_load_type, NULL, name: "load_dst" ); |
1302 | load_stmt = gimple_build_assign (val_tmp, val_expr); |
1303 | gimple_set_vuse (g: load_stmt, vuse: n->vuse); |
1304 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); |
1305 | } |
1306 | src = val_tmp; |
1307 | } |
1308 | else if (!bswap) |
1309 | { |
1310 | gimple *g = NULL; |
1311 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src))) |
1312 | { |
1313 | if (!is_gimple_val (src)) |
1314 | return NULL_TREE; |
1315 | if (conv_code == VIEW_CONVERT_EXPR) |
1316 | src = bswap_view_convert (gsi: &gsi, TREE_TYPE (tgt), val: src, before: true); |
1317 | g = gimple_build_assign (tgt, conv_code, src); |
1318 | } |
1319 | else if (cur_stmt) |
1320 | g = gimple_build_assign (tgt, src); |
1321 | else |
1322 | tgt = src; |
1323 | if (n->range == 16) |
1324 | nop_stats.found_16bit++; |
1325 | else if (n->range == 32) |
1326 | nop_stats.found_32bit++; |
1327 | else |
1328 | { |
1329 | gcc_assert (n->range == 64); |
1330 | nop_stats.found_64bit++; |
1331 | } |
1332 | if (dump_file) |
1333 | { |
1334 | fprintf (stream: dump_file, |
1335 | format: "%d bit reshuffle in target endianness found at: " , |
1336 | (int) n->range); |
1337 | if (cur_stmt) |
1338 | print_gimple_stmt (dump_file, cur_stmt, 0); |
1339 | else |
1340 | { |
1341 | print_generic_expr (dump_file, tgt, TDF_NONE); |
1342 | fprintf (stream: dump_file, format: "\n" ); |
1343 | } |
1344 | } |
1345 | if (cur_stmt) |
1346 | gsi_replace (&gsi, g, true); |
1347 | return tgt; |
1348 | } |
1349 | else if (TREE_CODE (src) == BIT_FIELD_REF) |
1350 | src = TREE_OPERAND (src, 0); |
1351 | |
1352 | if (n->range == 16) |
1353 | bswap_stats.found_16bit++; |
1354 | else if (n->range == 32) |
1355 | bswap_stats.found_32bit++; |
1356 | else |
1357 | { |
1358 | gcc_assert (n->range == 64); |
1359 | bswap_stats.found_64bit++; |
1360 | } |
1361 | |
1362 | tmp = src; |
1363 | |
1364 | /* Convert the src expression if necessary. */ |
1365 | if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type)) |
1366 | { |
1367 | gimple *convert_stmt; |
1368 | |
1369 | tmp = make_temp_ssa_name (type: bswap_type, NULL, name: "bswapsrc" ); |
1370 | convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src); |
1371 | gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT); |
1372 | } |
1373 | |
1374 | /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values |
1375 | are considered as rotation of 2N bit values by N bits is generally not |
1376 | equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which |
1377 | gives 0x03040102 while a bswap for that value is 0x04030201. */ |
1378 | if (bswap && n->range == 16) |
1379 | { |
1380 | tree count = build_int_cst (NULL, BITS_PER_UNIT); |
1381 | src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count); |
1382 | bswap_stmt = gimple_build_assign (NULL, src); |
1383 | } |
1384 | else |
1385 | bswap_stmt = gimple_build_call (fndecl, 1, tmp); |
1386 | |
1387 | if (tgt == NULL_TREE) |
1388 | tgt = make_ssa_name (var: bswap_type); |
1389 | tmp = tgt; |
1390 | |
1391 | if (mask != ~(uint64_t) 0) |
1392 | { |
1393 | tree m = build_int_cst (bswap_type, mask); |
1394 | tmp = make_temp_ssa_name (type: bswap_type, NULL, name: "bswapdst" ); |
1395 | gimple_set_lhs (bswap_stmt, tmp); |
1396 | mask_stmt = gimple_build_assign (tgt, BIT_AND_EXPR, tmp, m); |
1397 | tmp = tgt; |
1398 | } |
1399 | |
1400 | if (l_rotate) |
1401 | { |
1402 | tree m = build_int_cst (bswap_type, l_rotate); |
1403 | tmp = make_temp_ssa_name (type: bswap_type, NULL, |
1404 | name: mask_stmt ? "bswapmaskdst" : "bswapdst" ); |
1405 | gimple_set_lhs (mask_stmt ? mask_stmt : bswap_stmt, tmp); |
1406 | rotl_stmt = gimple_build_assign (tgt, LROTATE_EXPR, tmp, m); |
1407 | tmp = tgt; |
1408 | } |
1409 | |
1410 | /* Convert the result if necessary. */ |
1411 | if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type)) |
1412 | { |
1413 | tmp = make_temp_ssa_name (type: bswap_type, NULL, name: "bswapdst" ); |
1414 | tree atmp = tmp; |
1415 | gimple_stmt_iterator gsi2 = gsi; |
1416 | if (conv_code == VIEW_CONVERT_EXPR) |
1417 | atmp = bswap_view_convert (gsi: &gsi2, TREE_TYPE (tgt), val: tmp, before: false); |
1418 | gimple *convert_stmt = gimple_build_assign (tgt, conv_code, atmp); |
1419 | gsi_insert_after (&gsi2, convert_stmt, GSI_SAME_STMT); |
1420 | } |
1421 | |
1422 | gimple_set_lhs (rotl_stmt ? rotl_stmt |
1423 | : mask_stmt ? mask_stmt : bswap_stmt, tmp); |
1424 | |
1425 | if (dump_file) |
1426 | { |
1427 | fprintf (stream: dump_file, format: "%d bit bswap implementation found at: " , |
1428 | (int) n->range); |
1429 | if (cur_stmt) |
1430 | print_gimple_stmt (dump_file, cur_stmt, 0); |
1431 | else |
1432 | { |
1433 | print_generic_expr (dump_file, tgt, TDF_NONE); |
1434 | fprintf (stream: dump_file, format: "\n" ); |
1435 | } |
1436 | } |
1437 | |
1438 | if (cur_stmt) |
1439 | { |
1440 | if (rotl_stmt) |
1441 | gsi_insert_after (&gsi, rotl_stmt, GSI_SAME_STMT); |
1442 | if (mask_stmt) |
1443 | gsi_insert_after (&gsi, mask_stmt, GSI_SAME_STMT); |
1444 | gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT); |
1445 | gsi_remove (&gsi, true); |
1446 | } |
1447 | else |
1448 | { |
1449 | gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT); |
1450 | if (mask_stmt) |
1451 | gsi_insert_before (&gsi, mask_stmt, GSI_SAME_STMT); |
1452 | if (rotl_stmt) |
1453 | gsi_insert_after (&gsi, rotl_stmt, GSI_SAME_STMT); |
1454 | } |
1455 | return tgt; |
1456 | } |
1457 | |
1458 | /* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs |
1459 | using bswap optimizations. CDI_DOMINATORS need to be |
1460 | computed on entry. Return true if it has been optimized and |
1461 | TODO_update_ssa is needed. */ |
1462 | |
1463 | static bool |
1464 | maybe_optimize_vector_constructor (gimple *cur_stmt) |
1465 | { |
1466 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; |
1467 | struct symbolic_number n; |
1468 | bool bswap; |
1469 | |
1470 | gcc_assert (is_gimple_assign (cur_stmt) |
1471 | && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR); |
1472 | |
1473 | tree rhs = gimple_assign_rhs1 (gs: cur_stmt); |
1474 | if (!VECTOR_TYPE_P (TREE_TYPE (rhs)) |
1475 | || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))) |
1476 | || gimple_assign_lhs (gs: cur_stmt) == NULL_TREE) |
1477 | return false; |
1478 | |
1479 | HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT; |
1480 | switch (sz) |
1481 | { |
1482 | case 16: |
1483 | load_type = bswap_type = uint16_type_node; |
1484 | break; |
1485 | case 32: |
1486 | if (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP32) |
1487 | && optab_handler (op: bswap_optab, SImode) != CODE_FOR_nothing) |
1488 | { |
1489 | load_type = uint32_type_node; |
1490 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP32); |
1491 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
1492 | } |
1493 | else |
1494 | return false; |
1495 | break; |
1496 | case 64: |
1497 | if (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP64) |
1498 | && (optab_handler (op: bswap_optab, DImode) != CODE_FOR_nothing |
1499 | || (word_mode == SImode |
1500 | && builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP32) |
1501 | && optab_handler (op: bswap_optab, SImode) != CODE_FOR_nothing))) |
1502 | { |
1503 | load_type = uint64_type_node; |
1504 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP64); |
1505 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
1506 | } |
1507 | else |
1508 | return false; |
1509 | break; |
1510 | default: |
1511 | return false; |
1512 | } |
1513 | |
1514 | bool cast64_to_32; |
1515 | uint64_t mask, l_rotate; |
1516 | gimple *ins_stmt = find_bswap_or_nop (stmt: cur_stmt, n: &n, bswap: &bswap, |
1517 | cast64_to_32: &cast64_to_32, mask: &mask, l_rotate: &l_rotate); |
1518 | if (!ins_stmt |
1519 | || n.range != (unsigned HOST_WIDE_INT) sz |
1520 | || cast64_to_32 |
1521 | || mask != ~(uint64_t) 0) |
1522 | return false; |
1523 | |
1524 | if (bswap && !fndecl && n.range != 16) |
1525 | return false; |
1526 | |
1527 | memset (s: &nop_stats, c: 0, n: sizeof (nop_stats)); |
1528 | memset (s: &bswap_stats, c: 0, n: sizeof (bswap_stats)); |
1529 | return bswap_replace (gsi: gsi_for_stmt (cur_stmt), ins_stmt, fndecl, |
1530 | bswap_type, load_type, n: &n, bswap, mask, |
1531 | l_rotate) != NULL_TREE; |
1532 | } |
1533 | |
1534 | /* Find manual byte swap implementations as well as load in a given |
1535 | endianness. Byte swaps are turned into a bswap builtin invokation |
1536 | while endian loads are converted to bswap builtin invokation or |
1537 | simple load according to the target endianness. */ |
1538 | |
1539 | unsigned int |
1540 | pass_optimize_bswap::execute (function *fun) |
1541 | { |
1542 | basic_block bb; |
1543 | bool bswap32_p, bswap64_p; |
1544 | bool changed = false; |
1545 | tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE; |
1546 | |
1547 | bswap32_p = (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP32) |
1548 | && optab_handler (op: bswap_optab, SImode) != CODE_FOR_nothing); |
1549 | bswap64_p = (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP64) |
1550 | && (optab_handler (op: bswap_optab, DImode) != CODE_FOR_nothing |
1551 | || (bswap32_p && word_mode == SImode))); |
1552 | |
1553 | /* Determine the argument type of the builtins. The code later on |
1554 | assumes that the return and argument type are the same. */ |
1555 | if (bswap32_p) |
1556 | { |
1557 | tree fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP32); |
1558 | bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
1559 | } |
1560 | |
1561 | if (bswap64_p) |
1562 | { |
1563 | tree fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP64); |
1564 | bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
1565 | } |
1566 | |
1567 | memset (s: &nop_stats, c: 0, n: sizeof (nop_stats)); |
1568 | memset (s: &bswap_stats, c: 0, n: sizeof (bswap_stats)); |
1569 | calculate_dominance_info (CDI_DOMINATORS); |
1570 | |
1571 | FOR_EACH_BB_FN (bb, fun) |
1572 | { |
1573 | gimple_stmt_iterator gsi; |
1574 | |
1575 | /* We do a reverse scan for bswap patterns to make sure we get the |
1576 | widest match. As bswap pattern matching doesn't handle previously |
1577 | inserted smaller bswap replacements as sub-patterns, the wider |
1578 | variant wouldn't be detected. */ |
1579 | for (gsi = gsi_last_bb (bb); !gsi_end_p (i: gsi);) |
1580 | { |
1581 | gimple *ins_stmt, *cur_stmt = gsi_stmt (i: gsi); |
1582 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; |
1583 | enum tree_code code; |
1584 | struct symbolic_number n; |
1585 | bool bswap, cast64_to_32; |
1586 | uint64_t mask, l_rotate; |
1587 | |
1588 | /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt |
1589 | might be moved to a different basic block by bswap_replace and gsi |
1590 | must not points to it if that's the case. Moving the gsi_prev |
1591 | there make sure that gsi points to the statement previous to |
1592 | cur_stmt while still making sure that all statements are |
1593 | considered in this basic block. */ |
1594 | gsi_prev (i: &gsi); |
1595 | |
1596 | if (!is_gimple_assign (gs: cur_stmt)) |
1597 | continue; |
1598 | |
1599 | code = gimple_assign_rhs_code (gs: cur_stmt); |
1600 | switch (code) |
1601 | { |
1602 | case LROTATE_EXPR: |
1603 | case RROTATE_EXPR: |
1604 | if (!tree_fits_uhwi_p (gimple_assign_rhs2 (gs: cur_stmt)) |
1605 | || tree_to_uhwi (gimple_assign_rhs2 (gs: cur_stmt)) |
1606 | % BITS_PER_UNIT) |
1607 | continue; |
1608 | /* Fall through. */ |
1609 | case BIT_IOR_EXPR: |
1610 | case BIT_XOR_EXPR: |
1611 | case PLUS_EXPR: |
1612 | break; |
1613 | case CONSTRUCTOR: |
1614 | { |
1615 | tree rhs = gimple_assign_rhs1 (gs: cur_stmt); |
1616 | if (VECTOR_TYPE_P (TREE_TYPE (rhs)) |
1617 | && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))) |
1618 | break; |
1619 | } |
1620 | continue; |
1621 | default: |
1622 | continue; |
1623 | } |
1624 | |
1625 | ins_stmt = find_bswap_or_nop (stmt: cur_stmt, n: &n, bswap: &bswap, |
1626 | cast64_to_32: &cast64_to_32, mask: &mask, l_rotate: &l_rotate); |
1627 | |
1628 | if (!ins_stmt) |
1629 | continue; |
1630 | |
1631 | switch (n.range) |
1632 | { |
1633 | case 16: |
1634 | /* Already in canonical form, nothing to do. */ |
1635 | if (code == LROTATE_EXPR || code == RROTATE_EXPR) |
1636 | continue; |
1637 | load_type = bswap_type = uint16_type_node; |
1638 | break; |
1639 | case 32: |
1640 | load_type = uint32_type_node; |
1641 | if (bswap32_p) |
1642 | { |
1643 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP32); |
1644 | bswap_type = bswap32_type; |
1645 | } |
1646 | break; |
1647 | case 64: |
1648 | load_type = uint64_type_node; |
1649 | if (bswap64_p) |
1650 | { |
1651 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP64); |
1652 | bswap_type = bswap64_type; |
1653 | } |
1654 | break; |
1655 | default: |
1656 | continue; |
1657 | } |
1658 | |
1659 | if (bswap && !fndecl && n.range != 16) |
1660 | continue; |
1661 | |
1662 | if (bswap_replace (gsi: gsi_for_stmt (cur_stmt), ins_stmt, fndecl, |
1663 | bswap_type, load_type, n: &n, bswap, mask, |
1664 | l_rotate)) |
1665 | changed = true; |
1666 | } |
1667 | } |
1668 | |
1669 | statistics_counter_event (fun, "16-bit nop implementations found" , |
1670 | nop_stats.found_16bit); |
1671 | statistics_counter_event (fun, "32-bit nop implementations found" , |
1672 | nop_stats.found_32bit); |
1673 | statistics_counter_event (fun, "64-bit nop implementations found" , |
1674 | nop_stats.found_64bit); |
1675 | statistics_counter_event (fun, "16-bit bswap implementations found" , |
1676 | bswap_stats.found_16bit); |
1677 | statistics_counter_event (fun, "32-bit bswap implementations found" , |
1678 | bswap_stats.found_32bit); |
1679 | statistics_counter_event (fun, "64-bit bswap implementations found" , |
1680 | bswap_stats.found_64bit); |
1681 | |
1682 | return (changed ? TODO_update_ssa : 0); |
1683 | } |
1684 | |
1685 | } // anon namespace |
1686 | |
1687 | gimple_opt_pass * |
1688 | make_pass_optimize_bswap (gcc::context *ctxt) |
1689 | { |
1690 | return new pass_optimize_bswap (ctxt); |
1691 | } |
1692 | |
1693 | namespace { |
1694 | |
1695 | /* Struct recording one operand for the store, which is either a constant, |
1696 | then VAL represents the constant and all the other fields are zero, or |
1697 | a memory load, then VAL represents the reference, BASE_ADDR is non-NULL |
1698 | and the other fields also reflect the memory load, or an SSA name, then |
1699 | VAL represents the SSA name and all the other fields are zero. */ |
1700 | |
1701 | class store_operand_info |
1702 | { |
1703 | public: |
1704 | tree val; |
1705 | tree base_addr; |
1706 | poly_uint64 bitsize; |
1707 | poly_uint64 bitpos; |
1708 | poly_uint64 bitregion_start; |
1709 | poly_uint64 bitregion_end; |
1710 | gimple *stmt; |
1711 | bool bit_not_p; |
1712 | store_operand_info (); |
1713 | }; |
1714 | |
1715 | store_operand_info::store_operand_info () |
1716 | : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0), |
1717 | bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false) |
1718 | { |
1719 | } |
1720 | |
1721 | /* Struct recording the information about a single store of an immediate |
1722 | to memory. These are created in the first phase and coalesced into |
1723 | merged_store_group objects in the second phase. */ |
1724 | |
1725 | class store_immediate_info |
1726 | { |
1727 | public: |
1728 | unsigned HOST_WIDE_INT bitsize; |
1729 | unsigned HOST_WIDE_INT bitpos; |
1730 | unsigned HOST_WIDE_INT bitregion_start; |
1731 | /* This is one past the last bit of the bit region. */ |
1732 | unsigned HOST_WIDE_INT bitregion_end; |
1733 | gimple *stmt; |
1734 | unsigned int order; |
1735 | /* INTEGER_CST for constant store, STRING_CST for string store, |
1736 | MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation, |
1737 | BIT_INSERT_EXPR for bit insertion. |
1738 | LROTATE_EXPR if it can be only bswap optimized and |
1739 | ops are not really meaningful. |
1740 | NOP_EXPR if bswap optimization detected identity, ops |
1741 | are not meaningful. */ |
1742 | enum tree_code rhs_code; |
1743 | /* Two fields for bswap optimization purposes. */ |
1744 | struct symbolic_number n; |
1745 | gimple *ins_stmt; |
1746 | /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */ |
1747 | bool bit_not_p; |
1748 | /* True if ops have been swapped and thus ops[1] represents |
1749 | rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */ |
1750 | bool ops_swapped_p; |
1751 | /* The index number of the landing pad, or 0 if there is none. */ |
1752 | int lp_nr; |
1753 | /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise |
1754 | just the first one. */ |
1755 | store_operand_info ops[2]; |
1756 | store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
1757 | unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
1758 | gimple *, unsigned int, enum tree_code, |
1759 | struct symbolic_number &, gimple *, bool, int, |
1760 | const store_operand_info &, |
1761 | const store_operand_info &); |
1762 | }; |
1763 | |
1764 | store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs, |
1765 | unsigned HOST_WIDE_INT bp, |
1766 | unsigned HOST_WIDE_INT brs, |
1767 | unsigned HOST_WIDE_INT bre, |
1768 | gimple *st, |
1769 | unsigned int ord, |
1770 | enum tree_code rhscode, |
1771 | struct symbolic_number &nr, |
1772 | gimple *ins_stmtp, |
1773 | bool bitnotp, |
1774 | int nr2, |
1775 | const store_operand_info &op0r, |
1776 | const store_operand_info &op1r) |
1777 | : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre), |
1778 | stmt (st), order (ord), rhs_code (rhscode), n (nr), |
1779 | ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false), |
1780 | lp_nr (nr2), ops { op0r, op1r } |
1781 | { |
1782 | } |
1783 | |
1784 | /* Struct representing a group of stores to contiguous memory locations. |
1785 | These are produced by the second phase (coalescing) and consumed in the |
1786 | third phase that outputs the widened stores. */ |
1787 | |
1788 | class merged_store_group |
1789 | { |
1790 | public: |
1791 | unsigned HOST_WIDE_INT start; |
1792 | unsigned HOST_WIDE_INT width; |
1793 | unsigned HOST_WIDE_INT bitregion_start; |
1794 | unsigned HOST_WIDE_INT bitregion_end; |
1795 | /* The size of the allocated memory for val and mask. */ |
1796 | unsigned HOST_WIDE_INT buf_size; |
1797 | unsigned HOST_WIDE_INT align_base; |
1798 | poly_uint64 load_align_base[2]; |
1799 | |
1800 | unsigned int align; |
1801 | unsigned int load_align[2]; |
1802 | unsigned int first_order; |
1803 | unsigned int last_order; |
1804 | bool bit_insertion; |
1805 | bool string_concatenation; |
1806 | bool only_constants; |
1807 | bool consecutive; |
1808 | unsigned int first_nonmergeable_order; |
1809 | int lp_nr; |
1810 | |
1811 | auto_vec<store_immediate_info *> stores; |
1812 | /* We record the first and last original statements in the sequence because |
1813 | we'll need their vuse/vdef and replacement position. It's easier to keep |
1814 | track of them separately as 'stores' is reordered by apply_stores. */ |
1815 | gimple *last_stmt; |
1816 | gimple *first_stmt; |
1817 | unsigned char *val; |
1818 | unsigned char *mask; |
1819 | |
1820 | merged_store_group (store_immediate_info *); |
1821 | ~merged_store_group (); |
1822 | bool can_be_merged_into (store_immediate_info *); |
1823 | void merge_into (store_immediate_info *); |
1824 | void merge_overlapping (store_immediate_info *); |
1825 | bool apply_stores (); |
1826 | private: |
1827 | void do_merge (store_immediate_info *); |
1828 | }; |
1829 | |
1830 | /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */ |
1831 | |
1832 | static void |
1833 | dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len) |
1834 | { |
1835 | if (!fd) |
1836 | return; |
1837 | |
1838 | for (unsigned int i = 0; i < len; i++) |
1839 | fprintf (stream: fd, format: "%02x " , ptr[i]); |
1840 | fprintf (stream: fd, format: "\n" ); |
1841 | } |
1842 | |
1843 | /* Clear out LEN bits starting from bit START in the byte array |
1844 | PTR. This clears the bits to the *right* from START. |
1845 | START must be within [0, BITS_PER_UNIT) and counts starting from |
1846 | the least significant bit. */ |
1847 | |
1848 | static void |
1849 | clear_bit_region_be (unsigned char *ptr, unsigned int start, |
1850 | unsigned int len) |
1851 | { |
1852 | if (len == 0) |
1853 | return; |
1854 | /* Clear len bits to the right of start. */ |
1855 | else if (len <= start + 1) |
1856 | { |
1857 | unsigned char mask = (~(~0U << len)); |
1858 | mask = mask << (start + 1U - len); |
1859 | ptr[0] &= ~mask; |
1860 | } |
1861 | else if (start != BITS_PER_UNIT - 1) |
1862 | { |
1863 | clear_bit_region_be (ptr, start, len: (start % BITS_PER_UNIT) + 1); |
1864 | clear_bit_region_be (ptr: ptr + 1, BITS_PER_UNIT - 1, |
1865 | len: len - (start % BITS_PER_UNIT) - 1); |
1866 | } |
1867 | else if (start == BITS_PER_UNIT - 1 |
1868 | && len > BITS_PER_UNIT) |
1869 | { |
1870 | unsigned int nbytes = len / BITS_PER_UNIT; |
1871 | memset (s: ptr, c: 0, n: nbytes); |
1872 | if (len % BITS_PER_UNIT != 0) |
1873 | clear_bit_region_be (ptr: ptr + nbytes, BITS_PER_UNIT - 1, |
1874 | len: len % BITS_PER_UNIT); |
1875 | } |
1876 | else |
1877 | gcc_unreachable (); |
1878 | } |
1879 | |
1880 | /* In the byte array PTR clear the bit region starting at bit |
1881 | START and is LEN bits wide. |
1882 | For regions spanning multiple bytes do this recursively until we reach |
1883 | zero LEN or a region contained within a single byte. */ |
1884 | |
1885 | static void |
1886 | clear_bit_region (unsigned char *ptr, unsigned int start, |
1887 | unsigned int len) |
1888 | { |
1889 | /* Degenerate base case. */ |
1890 | if (len == 0) |
1891 | return; |
1892 | else if (start >= BITS_PER_UNIT) |
1893 | clear_bit_region (ptr: ptr + 1, start: start - BITS_PER_UNIT, len); |
1894 | /* Second base case. */ |
1895 | else if ((start + len) <= BITS_PER_UNIT) |
1896 | { |
1897 | unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len); |
1898 | mask >>= BITS_PER_UNIT - (start + len); |
1899 | |
1900 | ptr[0] &= ~mask; |
1901 | |
1902 | return; |
1903 | } |
1904 | /* Clear most significant bits in a byte and proceed with the next byte. */ |
1905 | else if (start != 0) |
1906 | { |
1907 | clear_bit_region (ptr, start, BITS_PER_UNIT - start); |
1908 | clear_bit_region (ptr: ptr + 1, start: 0, len: len - (BITS_PER_UNIT - start)); |
1909 | } |
1910 | /* Whole bytes need to be cleared. */ |
1911 | else if (start == 0 && len > BITS_PER_UNIT) |
1912 | { |
1913 | unsigned int nbytes = len / BITS_PER_UNIT; |
1914 | /* We could recurse on each byte but we clear whole bytes, so a simple |
1915 | memset will do. */ |
1916 | memset (s: ptr, c: '\0', n: nbytes); |
1917 | /* Clear the remaining sub-byte region if there is one. */ |
1918 | if (len % BITS_PER_UNIT != 0) |
1919 | clear_bit_region (ptr: ptr + nbytes, start: 0, len: len % BITS_PER_UNIT); |
1920 | } |
1921 | else |
1922 | gcc_unreachable (); |
1923 | } |
1924 | |
1925 | /* Write BITLEN bits of EXPR to the byte array PTR at |
1926 | bit position BITPOS. PTR should contain TOTAL_BYTES elements. |
1927 | Return true if the operation succeeded. */ |
1928 | |
1929 | static bool |
1930 | encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos, |
1931 | unsigned int total_bytes) |
1932 | { |
1933 | unsigned int first_byte = bitpos / BITS_PER_UNIT; |
1934 | bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT) |
1935 | || (bitpos % BITS_PER_UNIT) |
1936 | || !int_mode_for_size (size: bitlen, limit: 0).exists ()); |
1937 | bool empty_ctor_p |
1938 | = (TREE_CODE (expr) == CONSTRUCTOR |
1939 | && CONSTRUCTOR_NELTS (expr) == 0 |
1940 | && TYPE_SIZE_UNIT (TREE_TYPE (expr)) |
1941 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr)))); |
1942 | |
1943 | if (!sub_byte_op_p) |
1944 | { |
1945 | if (first_byte >= total_bytes) |
1946 | return false; |
1947 | total_bytes -= first_byte; |
1948 | if (empty_ctor_p) |
1949 | { |
1950 | unsigned HOST_WIDE_INT rhs_bytes |
1951 | = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr))); |
1952 | if (rhs_bytes > total_bytes) |
1953 | return false; |
1954 | memset (s: ptr + first_byte, c: '\0', n: rhs_bytes); |
1955 | return true; |
1956 | } |
1957 | return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0; |
1958 | } |
1959 | |
1960 | /* LITTLE-ENDIAN |
1961 | We are writing a non byte-sized quantity or at a position that is not |
1962 | at a byte boundary. |
1963 | |--------|--------|--------| ptr + first_byte |
1964 | ^ ^ |
1965 | xxx xxxxxxxx xxx< bp> |
1966 | |______EXPR____| |
1967 | |
1968 | First native_encode_expr EXPR into a temporary buffer and shift each |
1969 | byte in the buffer by 'bp' (carrying the bits over as necessary). |
1970 | |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000| |
1971 | <------bitlen---->< bp> |
1972 | Then we clear the destination bits: |
1973 | |---00000|00000000|000-----| ptr + first_byte |
1974 | <-------bitlen--->< bp> |
1975 | |
1976 | Finally we ORR the bytes of the shifted EXPR into the cleared region: |
1977 | |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte. |
1978 | |
1979 | BIG-ENDIAN |
1980 | We are writing a non byte-sized quantity or at a position that is not |
1981 | at a byte boundary. |
1982 | ptr + first_byte |--------|--------|--------| |
1983 | ^ ^ |
1984 | <bp >xxx xxxxxxxx xxx |
1985 | |_____EXPR_____| |
1986 | |
1987 | First native_encode_expr EXPR into a temporary buffer and shift each |
1988 | byte in the buffer to the right by (carrying the bits over as necessary). |
1989 | We shift by as much as needed to align the most significant bit of EXPR |
1990 | with bitpos: |
1991 | |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000| |
1992 | <---bitlen----> <bp ><-----bitlen-----> |
1993 | Then we clear the destination bits: |
1994 | ptr + first_byte |-----000||00000000||00000---| |
1995 | <bp ><-------bitlen-----> |
1996 | |
1997 | Finally we ORR the bytes of the shifted EXPR into the cleared region: |
1998 | ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|. |
1999 | The awkwardness comes from the fact that bitpos is counted from the |
2000 | most significant bit of a byte. */ |
2001 | |
2002 | /* We must be dealing with fixed-size data at this point, since the |
2003 | total size is also fixed. */ |
2004 | unsigned int byte_size; |
2005 | if (empty_ctor_p) |
2006 | { |
2007 | unsigned HOST_WIDE_INT rhs_bytes |
2008 | = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr))); |
2009 | if (rhs_bytes > total_bytes) |
2010 | return false; |
2011 | byte_size = rhs_bytes; |
2012 | } |
2013 | else |
2014 | { |
2015 | fixed_size_mode mode |
2016 | = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr))); |
2017 | byte_size |
2018 | = mode == BLKmode |
2019 | ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr))) |
2020 | : GET_MODE_SIZE (mode); |
2021 | } |
2022 | /* Allocate an extra byte so that we have space to shift into. */ |
2023 | byte_size++; |
2024 | unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size); |
2025 | memset (s: tmpbuf, c: '\0', n: byte_size); |
2026 | /* The store detection code should only have allowed constants that are |
2027 | accepted by native_encode_expr or empty ctors. */ |
2028 | if (!empty_ctor_p |
2029 | && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0) |
2030 | gcc_unreachable (); |
2031 | |
2032 | /* The native_encode_expr machinery uses TYPE_MODE to determine how many |
2033 | bytes to write. This means it can write more than |
2034 | ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example |
2035 | write 8 bytes for a bitlen of 40). Skip the bytes that are not within |
2036 | bitlen and zero out the bits that are not relevant as well (that may |
2037 | contain a sign bit due to sign-extension). */ |
2038 | unsigned int padding |
2039 | = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1; |
2040 | /* On big-endian the padding is at the 'front' so just skip the initial |
2041 | bytes. */ |
2042 | if (BYTES_BIG_ENDIAN) |
2043 | tmpbuf += padding; |
2044 | |
2045 | byte_size -= padding; |
2046 | |
2047 | if (bitlen % BITS_PER_UNIT != 0) |
2048 | { |
2049 | if (BYTES_BIG_ENDIAN) |
2050 | clear_bit_region_be (ptr: tmpbuf, BITS_PER_UNIT - 1, |
2051 | BITS_PER_UNIT - (bitlen % BITS_PER_UNIT)); |
2052 | else |
2053 | clear_bit_region (ptr: tmpbuf, start: bitlen, |
2054 | len: byte_size * BITS_PER_UNIT - bitlen); |
2055 | } |
2056 | /* Left shifting relies on the last byte being clear if bitlen is |
2057 | a multiple of BITS_PER_UNIT, which might not be clear if |
2058 | there are padding bytes. */ |
2059 | else if (!BYTES_BIG_ENDIAN) |
2060 | tmpbuf[byte_size - 1] = '\0'; |
2061 | |
2062 | /* Clear the bit region in PTR where the bits from TMPBUF will be |
2063 | inserted into. */ |
2064 | if (BYTES_BIG_ENDIAN) |
2065 | clear_bit_region_be (ptr: ptr + first_byte, |
2066 | BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), len: bitlen); |
2067 | else |
2068 | clear_bit_region (ptr: ptr + first_byte, start: bitpos % BITS_PER_UNIT, len: bitlen); |
2069 | |
2070 | int shift_amnt; |
2071 | int bitlen_mod = bitlen % BITS_PER_UNIT; |
2072 | int bitpos_mod = bitpos % BITS_PER_UNIT; |
2073 | |
2074 | bool skip_byte = false; |
2075 | if (BYTES_BIG_ENDIAN) |
2076 | { |
2077 | /* BITPOS and BITLEN are exactly aligned and no shifting |
2078 | is necessary. */ |
2079 | if (bitpos_mod + bitlen_mod == BITS_PER_UNIT |
2080 | || (bitpos_mod == 0 && bitlen_mod == 0)) |
2081 | shift_amnt = 0; |
2082 | /* |. . . . . . . .| |
2083 | <bp > <blen >. |
2084 | We always shift right for BYTES_BIG_ENDIAN so shift the beginning |
2085 | of the value until it aligns with 'bp' in the next byte over. */ |
2086 | else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT) |
2087 | { |
2088 | shift_amnt = bitlen_mod + bitpos_mod; |
2089 | skip_byte = bitlen_mod != 0; |
2090 | } |
2091 | /* |. . . . . . . .| |
2092 | <----bp---> |
2093 | <---blen---->. |
2094 | Shift the value right within the same byte so it aligns with 'bp'. */ |
2095 | else |
2096 | shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT; |
2097 | } |
2098 | else |
2099 | shift_amnt = bitpos % BITS_PER_UNIT; |
2100 | |
2101 | /* Create the shifted version of EXPR. */ |
2102 | if (!BYTES_BIG_ENDIAN) |
2103 | { |
2104 | shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt); |
2105 | if (shift_amnt == 0) |
2106 | byte_size--; |
2107 | } |
2108 | else |
2109 | { |
2110 | gcc_assert (BYTES_BIG_ENDIAN); |
2111 | shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt); |
2112 | /* If shifting right forced us to move into the next byte skip the now |
2113 | empty byte. */ |
2114 | if (skip_byte) |
2115 | { |
2116 | tmpbuf++; |
2117 | byte_size--; |
2118 | } |
2119 | } |
2120 | |
2121 | /* Insert the bits from TMPBUF. */ |
2122 | for (unsigned int i = 0; i < byte_size; i++) |
2123 | ptr[first_byte + i] |= tmpbuf[i]; |
2124 | |
2125 | return true; |
2126 | } |
2127 | |
2128 | /* Sorting function for store_immediate_info objects. |
2129 | Sorts them by bitposition. */ |
2130 | |
2131 | static int |
2132 | sort_by_bitpos (const void *x, const void *y) |
2133 | { |
2134 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; |
2135 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; |
2136 | |
2137 | if ((*tmp)->bitpos < (*tmp2)->bitpos) |
2138 | return -1; |
2139 | else if ((*tmp)->bitpos > (*tmp2)->bitpos) |
2140 | return 1; |
2141 | else |
2142 | /* If they are the same let's use the order which is guaranteed to |
2143 | be different. */ |
2144 | return (*tmp)->order - (*tmp2)->order; |
2145 | } |
2146 | |
2147 | /* Sorting function for store_immediate_info objects. |
2148 | Sorts them by the order field. */ |
2149 | |
2150 | static int |
2151 | sort_by_order (const void *x, const void *y) |
2152 | { |
2153 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; |
2154 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; |
2155 | |
2156 | if ((*tmp)->order < (*tmp2)->order) |
2157 | return -1; |
2158 | else if ((*tmp)->order > (*tmp2)->order) |
2159 | return 1; |
2160 | |
2161 | gcc_unreachable (); |
2162 | } |
2163 | |
2164 | /* Initialize a merged_store_group object from a store_immediate_info |
2165 | object. */ |
2166 | |
2167 | merged_store_group::merged_store_group (store_immediate_info *info) |
2168 | { |
2169 | start = info->bitpos; |
2170 | width = info->bitsize; |
2171 | bitregion_start = info->bitregion_start; |
2172 | bitregion_end = info->bitregion_end; |
2173 | /* VAL has memory allocated for it in apply_stores once the group |
2174 | width has been finalized. */ |
2175 | val = NULL; |
2176 | mask = NULL; |
2177 | bit_insertion = info->rhs_code == BIT_INSERT_EXPR; |
2178 | string_concatenation = info->rhs_code == STRING_CST; |
2179 | only_constants = info->rhs_code == INTEGER_CST; |
2180 | consecutive = true; |
2181 | first_nonmergeable_order = ~0U; |
2182 | lp_nr = info->lp_nr; |
2183 | unsigned HOST_WIDE_INT align_bitpos = 0; |
2184 | get_object_alignment_1 (gimple_assign_lhs (gs: info->stmt), |
2185 | &align, &align_bitpos); |
2186 | align_base = start - align_bitpos; |
2187 | for (int i = 0; i < 2; ++i) |
2188 | { |
2189 | store_operand_info &op = info->ops[i]; |
2190 | if (op.base_addr == NULL_TREE) |
2191 | { |
2192 | load_align[i] = 0; |
2193 | load_align_base[i] = 0; |
2194 | } |
2195 | else |
2196 | { |
2197 | get_object_alignment_1 (op.val, &load_align[i], &align_bitpos); |
2198 | load_align_base[i] = op.bitpos - align_bitpos; |
2199 | } |
2200 | } |
2201 | stores.create (nelems: 1); |
2202 | stores.safe_push (obj: info); |
2203 | last_stmt = info->stmt; |
2204 | last_order = info->order; |
2205 | first_stmt = last_stmt; |
2206 | first_order = last_order; |
2207 | buf_size = 0; |
2208 | } |
2209 | |
2210 | merged_store_group::~merged_store_group () |
2211 | { |
2212 | if (val) |
2213 | XDELETEVEC (val); |
2214 | } |
2215 | |
2216 | /* Return true if the store described by INFO can be merged into the group. */ |
2217 | |
2218 | bool |
2219 | merged_store_group::can_be_merged_into (store_immediate_info *info) |
2220 | { |
2221 | /* Do not merge bswap patterns. */ |
2222 | if (info->rhs_code == LROTATE_EXPR) |
2223 | return false; |
2224 | |
2225 | if (info->lp_nr != lp_nr) |
2226 | return false; |
2227 | |
2228 | /* The canonical case. */ |
2229 | if (info->rhs_code == stores[0]->rhs_code) |
2230 | return true; |
2231 | |
2232 | /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */ |
2233 | if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST) |
2234 | return !string_concatenation; |
2235 | |
2236 | if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST) |
2237 | return !string_concatenation; |
2238 | |
2239 | /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it |
2240 | only for small regions since this can generate a lot of instructions. */ |
2241 | if (info->rhs_code == MEM_REF |
2242 | && (stores[0]->rhs_code == INTEGER_CST |
2243 | || stores[0]->rhs_code == BIT_INSERT_EXPR) |
2244 | && info->bitregion_start == stores[0]->bitregion_start |
2245 | && info->bitregion_end == stores[0]->bitregion_end |
2246 | && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE) |
2247 | return !string_concatenation; |
2248 | |
2249 | if (stores[0]->rhs_code == MEM_REF |
2250 | && (info->rhs_code == INTEGER_CST |
2251 | || info->rhs_code == BIT_INSERT_EXPR) |
2252 | && info->bitregion_start == stores[0]->bitregion_start |
2253 | && info->bitregion_end == stores[0]->bitregion_end |
2254 | && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE) |
2255 | return !string_concatenation; |
2256 | |
2257 | /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */ |
2258 | if (info->rhs_code == STRING_CST |
2259 | && stores[0]->rhs_code == INTEGER_CST |
2260 | && stores[0]->bitsize == CHAR_BIT) |
2261 | return !bit_insertion; |
2262 | |
2263 | if (stores[0]->rhs_code == STRING_CST |
2264 | && info->rhs_code == INTEGER_CST |
2265 | && info->bitsize == CHAR_BIT) |
2266 | return !bit_insertion; |
2267 | |
2268 | return false; |
2269 | } |
2270 | |
2271 | /* Helper method for merge_into and merge_overlapping to do |
2272 | the common part. */ |
2273 | |
2274 | void |
2275 | merged_store_group::do_merge (store_immediate_info *info) |
2276 | { |
2277 | bitregion_start = MIN (bitregion_start, info->bitregion_start); |
2278 | bitregion_end = MAX (bitregion_end, info->bitregion_end); |
2279 | |
2280 | unsigned int this_align; |
2281 | unsigned HOST_WIDE_INT align_bitpos = 0; |
2282 | get_object_alignment_1 (gimple_assign_lhs (gs: info->stmt), |
2283 | &this_align, &align_bitpos); |
2284 | if (this_align > align) |
2285 | { |
2286 | align = this_align; |
2287 | align_base = info->bitpos - align_bitpos; |
2288 | } |
2289 | for (int i = 0; i < 2; ++i) |
2290 | { |
2291 | store_operand_info &op = info->ops[i]; |
2292 | if (!op.base_addr) |
2293 | continue; |
2294 | |
2295 | get_object_alignment_1 (op.val, &this_align, &align_bitpos); |
2296 | if (this_align > load_align[i]) |
2297 | { |
2298 | load_align[i] = this_align; |
2299 | load_align_base[i] = op.bitpos - align_bitpos; |
2300 | } |
2301 | } |
2302 | |
2303 | gimple *stmt = info->stmt; |
2304 | stores.safe_push (obj: info); |
2305 | if (info->order > last_order) |
2306 | { |
2307 | last_order = info->order; |
2308 | last_stmt = stmt; |
2309 | } |
2310 | else if (info->order < first_order) |
2311 | { |
2312 | first_order = info->order; |
2313 | first_stmt = stmt; |
2314 | } |
2315 | |
2316 | if (info->bitpos != start + width) |
2317 | consecutive = false; |
2318 | |
2319 | /* We need to use extraction if there is any bit-field. */ |
2320 | if (info->rhs_code == BIT_INSERT_EXPR) |
2321 | { |
2322 | bit_insertion = true; |
2323 | gcc_assert (!string_concatenation); |
2324 | } |
2325 | |
2326 | /* We want to use concatenation if there is any string. */ |
2327 | if (info->rhs_code == STRING_CST) |
2328 | { |
2329 | string_concatenation = true; |
2330 | gcc_assert (!bit_insertion); |
2331 | } |
2332 | |
2333 | /* But we cannot use it if we don't have consecutive stores. */ |
2334 | if (!consecutive) |
2335 | string_concatenation = false; |
2336 | |
2337 | if (info->rhs_code != INTEGER_CST) |
2338 | only_constants = false; |
2339 | } |
2340 | |
2341 | /* Merge a store recorded by INFO into this merged store. |
2342 | The store is not overlapping with the existing recorded |
2343 | stores. */ |
2344 | |
2345 | void |
2346 | merged_store_group::merge_into (store_immediate_info *info) |
2347 | { |
2348 | do_merge (info); |
2349 | |
2350 | /* Make sure we're inserting in the position we think we're inserting. */ |
2351 | gcc_assert (info->bitpos >= start + width |
2352 | && info->bitregion_start <= bitregion_end); |
2353 | |
2354 | width = info->bitpos + info->bitsize - start; |
2355 | } |
2356 | |
2357 | /* Merge a store described by INFO into this merged store. |
2358 | INFO overlaps in some way with the current store (i.e. it's not contiguous |
2359 | which is handled by merged_store_group::merge_into). */ |
2360 | |
2361 | void |
2362 | merged_store_group::merge_overlapping (store_immediate_info *info) |
2363 | { |
2364 | do_merge (info); |
2365 | |
2366 | /* If the store extends the size of the group, extend the width. */ |
2367 | if (info->bitpos + info->bitsize > start + width) |
2368 | width = info->bitpos + info->bitsize - start; |
2369 | } |
2370 | |
2371 | /* Go through all the recorded stores in this group in program order and |
2372 | apply their values to the VAL byte array to create the final merged |
2373 | value. Return true if the operation succeeded. */ |
2374 | |
2375 | bool |
2376 | merged_store_group::apply_stores () |
2377 | { |
2378 | store_immediate_info *info; |
2379 | unsigned int i; |
2380 | |
2381 | /* Make sure we have more than one store in the group, otherwise we cannot |
2382 | merge anything. */ |
2383 | if (bitregion_start % BITS_PER_UNIT != 0 |
2384 | || bitregion_end % BITS_PER_UNIT != 0 |
2385 | || stores.length () == 1) |
2386 | return false; |
2387 | |
2388 | buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT; |
2389 | |
2390 | /* Really do string concatenation for large strings only. */ |
2391 | if (buf_size <= MOVE_MAX) |
2392 | string_concatenation = false; |
2393 | |
2394 | /* String concatenation only works for byte aligned start and end. */ |
2395 | if (start % BITS_PER_UNIT != 0 || width % BITS_PER_UNIT != 0) |
2396 | string_concatenation = false; |
2397 | |
2398 | /* Create a power-of-2-sized buffer for native_encode_expr. */ |
2399 | if (!string_concatenation) |
2400 | buf_size = 1 << ceil_log2 (x: buf_size); |
2401 | |
2402 | val = XNEWVEC (unsigned char, 2 * buf_size); |
2403 | mask = val + buf_size; |
2404 | memset (s: val, c: 0, n: buf_size); |
2405 | memset (s: mask, c: ~0U, n: buf_size); |
2406 | |
2407 | stores.qsort (sort_by_order); |
2408 | |
2409 | FOR_EACH_VEC_ELT (stores, i, info) |
2410 | { |
2411 | unsigned int pos_in_buffer = info->bitpos - bitregion_start; |
2412 | tree cst; |
2413 | if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE) |
2414 | cst = info->ops[0].val; |
2415 | else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE) |
2416 | cst = info->ops[1].val; |
2417 | else |
2418 | cst = NULL_TREE; |
2419 | bool ret = true; |
2420 | if (cst && info->rhs_code != BIT_INSERT_EXPR) |
2421 | ret = encode_tree_to_bitpos (expr: cst, ptr: val, bitlen: info->bitsize, bitpos: pos_in_buffer, |
2422 | total_bytes: buf_size); |
2423 | unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT); |
2424 | if (BYTES_BIG_ENDIAN) |
2425 | clear_bit_region_be (ptr: m, start: (BITS_PER_UNIT - 1 |
2426 | - (pos_in_buffer % BITS_PER_UNIT)), |
2427 | len: info->bitsize); |
2428 | else |
2429 | clear_bit_region (ptr: m, start: pos_in_buffer % BITS_PER_UNIT, len: info->bitsize); |
2430 | if (cst && dump_file && (dump_flags & TDF_DETAILS)) |
2431 | { |
2432 | if (ret) |
2433 | { |
2434 | fputs (s: "After writing " , stream: dump_file); |
2435 | print_generic_expr (dump_file, cst, TDF_NONE); |
2436 | fprintf (stream: dump_file, format: " of size " HOST_WIDE_INT_PRINT_DEC |
2437 | " at position %d\n" , info->bitsize, pos_in_buffer); |
2438 | fputs (s: " the merged value contains " , stream: dump_file); |
2439 | dump_char_array (fd: dump_file, ptr: val, len: buf_size); |
2440 | fputs (s: " the merged mask contains " , stream: dump_file); |
2441 | dump_char_array (fd: dump_file, ptr: mask, len: buf_size); |
2442 | if (bit_insertion) |
2443 | fputs (s: " bit insertion is required\n" , stream: dump_file); |
2444 | if (string_concatenation) |
2445 | fputs (s: " string concatenation is required\n" , stream: dump_file); |
2446 | } |
2447 | else |
2448 | fprintf (stream: dump_file, format: "Failed to merge stores\n" ); |
2449 | } |
2450 | if (!ret) |
2451 | return false; |
2452 | } |
2453 | stores.qsort (sort_by_bitpos); |
2454 | return true; |
2455 | } |
2456 | |
2457 | /* Structure describing the store chain. */ |
2458 | |
2459 | class imm_store_chain_info |
2460 | { |
2461 | public: |
2462 | /* Doubly-linked list that imposes an order on chain processing. |
2463 | PNXP (prev's next pointer) points to the head of a list, or to |
2464 | the next field in the previous chain in the list. |
2465 | See pass_store_merging::m_stores_head for more rationale. */ |
2466 | imm_store_chain_info *next, **pnxp; |
2467 | tree base_addr; |
2468 | auto_vec<store_immediate_info *> m_store_info; |
2469 | auto_vec<merged_store_group *> m_merged_store_groups; |
2470 | |
2471 | imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a) |
2472 | : next (inspt), pnxp (&inspt), base_addr (b_a) |
2473 | { |
2474 | inspt = this; |
2475 | if (next) |
2476 | { |
2477 | gcc_checking_assert (pnxp == next->pnxp); |
2478 | next->pnxp = &next; |
2479 | } |
2480 | } |
2481 | ~imm_store_chain_info () |
2482 | { |
2483 | *pnxp = next; |
2484 | if (next) |
2485 | { |
2486 | gcc_checking_assert (&next == next->pnxp); |
2487 | next->pnxp = pnxp; |
2488 | } |
2489 | } |
2490 | bool terminate_and_process_chain (); |
2491 | bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int, |
2492 | unsigned int); |
2493 | bool coalesce_immediate_stores (); |
2494 | bool output_merged_store (merged_store_group *); |
2495 | bool output_merged_stores (); |
2496 | }; |
2497 | |
2498 | const pass_data pass_data_tree_store_merging = { |
2499 | .type: GIMPLE_PASS, /* type */ |
2500 | .name: "store-merging" , /* name */ |
2501 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
2502 | .tv_id: TV_GIMPLE_STORE_MERGING, /* tv_id */ |
2503 | PROP_ssa, /* properties_required */ |
2504 | .properties_provided: 0, /* properties_provided */ |
2505 | .properties_destroyed: 0, /* properties_destroyed */ |
2506 | .todo_flags_start: 0, /* todo_flags_start */ |
2507 | TODO_update_ssa, /* todo_flags_finish */ |
2508 | }; |
2509 | |
2510 | class pass_store_merging : public gimple_opt_pass |
2511 | { |
2512 | public: |
2513 | pass_store_merging (gcc::context *ctxt) |
2514 | : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head (), |
2515 | m_n_chains (0), m_n_stores (0) |
2516 | { |
2517 | } |
2518 | |
2519 | /* Pass not supported for PDP-endian, nor for insane hosts or |
2520 | target character sizes where native_{encode,interpret}_expr |
2521 | doesn't work properly. */ |
2522 | bool |
2523 | gate (function *) final override |
2524 | { |
2525 | return flag_store_merging |
2526 | && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN |
2527 | && CHAR_BIT == 8 |
2528 | && BITS_PER_UNIT == 8; |
2529 | } |
2530 | |
2531 | unsigned int execute (function *) final override; |
2532 | |
2533 | private: |
2534 | hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores; |
2535 | |
2536 | /* Form a doubly-linked stack of the elements of m_stores, so that |
2537 | we can iterate over them in a predictable way. Using this order |
2538 | avoids extraneous differences in the compiler output just because |
2539 | of tree pointer variations (e.g. different chains end up in |
2540 | different positions of m_stores, so they are handled in different |
2541 | orders, so they allocate or release SSA names in different |
2542 | orders, and when they get reused, subsequent passes end up |
2543 | getting different SSA names, which may ultimately change |
2544 | decisions when going out of SSA). */ |
2545 | imm_store_chain_info *m_stores_head; |
2546 | |
2547 | /* The number of store chains currently tracked. */ |
2548 | unsigned m_n_chains; |
2549 | /* The number of stores currently tracked. */ |
2550 | unsigned m_n_stores; |
2551 | |
2552 | bool process_store (gimple *); |
2553 | bool terminate_and_process_chain (imm_store_chain_info *); |
2554 | bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *); |
2555 | bool terminate_and_process_all_chains (); |
2556 | }; // class pass_store_merging |
2557 | |
2558 | /* Terminate and process all recorded chains. Return true if any changes |
2559 | were made. */ |
2560 | |
2561 | bool |
2562 | pass_store_merging::terminate_and_process_all_chains () |
2563 | { |
2564 | bool ret = false; |
2565 | while (m_stores_head) |
2566 | ret |= terminate_and_process_chain (m_stores_head); |
2567 | gcc_assert (m_stores.is_empty ()); |
2568 | return ret; |
2569 | } |
2570 | |
2571 | /* Terminate all chains that are affected by the statement STMT. |
2572 | CHAIN_INFO is the chain we should ignore from the checks if |
2573 | non-NULL. Return true if any changes were made. */ |
2574 | |
2575 | bool |
2576 | pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info |
2577 | **chain_info, |
2578 | gimple *stmt) |
2579 | { |
2580 | bool ret = false; |
2581 | |
2582 | /* If the statement doesn't touch memory it can't alias. */ |
2583 | if (!gimple_vuse (g: stmt)) |
2584 | return false; |
2585 | |
2586 | tree store_lhs = gimple_store_p (gs: stmt) ? gimple_get_lhs (stmt) : NULL_TREE; |
2587 | ao_ref store_lhs_ref; |
2588 | ao_ref_init (&store_lhs_ref, store_lhs); |
2589 | for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next) |
2590 | { |
2591 | next = cur->next; |
2592 | |
2593 | /* We already checked all the stores in chain_info and terminated the |
2594 | chain if necessary. Skip it here. */ |
2595 | if (chain_info && *chain_info == cur) |
2596 | continue; |
2597 | |
2598 | store_immediate_info *info; |
2599 | unsigned int i; |
2600 | FOR_EACH_VEC_ELT (cur->m_store_info, i, info) |
2601 | { |
2602 | tree lhs = gimple_assign_lhs (gs: info->stmt); |
2603 | ao_ref lhs_ref; |
2604 | ao_ref_init (&lhs_ref, lhs); |
2605 | if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref) |
2606 | || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref) |
2607 | || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref, |
2608 | &lhs_ref, false))) |
2609 | { |
2610 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2611 | { |
2612 | fprintf (stream: dump_file, format: "stmt causes chain termination:\n" ); |
2613 | print_gimple_stmt (dump_file, stmt, 0); |
2614 | } |
2615 | ret |= terminate_and_process_chain (cur); |
2616 | break; |
2617 | } |
2618 | } |
2619 | } |
2620 | |
2621 | return ret; |
2622 | } |
2623 | |
2624 | /* Helper function. Terminate the recorded chain storing to base object |
2625 | BASE. Return true if the merging and output was successful. The m_stores |
2626 | entry is removed after the processing in any case. */ |
2627 | |
2628 | bool |
2629 | pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info) |
2630 | { |
2631 | m_n_stores -= chain_info->m_store_info.length (); |
2632 | m_n_chains--; |
2633 | bool ret = chain_info->terminate_and_process_chain (); |
2634 | m_stores.remove (k: chain_info->base_addr); |
2635 | delete chain_info; |
2636 | return ret; |
2637 | } |
2638 | |
2639 | /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive) |
2640 | may clobber REF. FIRST and LAST must have non-NULL vdef. We want to |
2641 | be able to sink load of REF across stores between FIRST and LAST, up |
2642 | to right before LAST. */ |
2643 | |
2644 | bool |
2645 | stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref) |
2646 | { |
2647 | ao_ref r; |
2648 | ao_ref_init (&r, ref); |
2649 | unsigned int count = 0; |
2650 | tree vop = gimple_vdef (g: last); |
2651 | gimple *stmt; |
2652 | |
2653 | /* Return true conservatively if the basic blocks are different. */ |
2654 | if (gimple_bb (g: first) != gimple_bb (g: last)) |
2655 | return true; |
2656 | |
2657 | do |
2658 | { |
2659 | stmt = SSA_NAME_DEF_STMT (vop); |
2660 | if (stmt_may_clobber_ref_p_1 (stmt, &r)) |
2661 | return true; |
2662 | if (gimple_store_p (gs: stmt) |
2663 | && refs_anti_dependent_p (ref, gimple_get_lhs (stmt))) |
2664 | return true; |
2665 | /* Avoid quadratic compile time by bounding the number of checks |
2666 | we perform. */ |
2667 | if (++count > MAX_STORE_ALIAS_CHECKS) |
2668 | return true; |
2669 | vop = gimple_vuse (g: stmt); |
2670 | } |
2671 | while (stmt != first); |
2672 | |
2673 | return false; |
2674 | } |
2675 | |
2676 | /* Return true if INFO->ops[IDX] is mergeable with the |
2677 | corresponding loads already in MERGED_STORE group. |
2678 | BASE_ADDR is the base address of the whole store group. */ |
2679 | |
2680 | bool |
2681 | compatible_load_p (merged_store_group *merged_store, |
2682 | store_immediate_info *info, |
2683 | tree base_addr, int idx) |
2684 | { |
2685 | store_immediate_info *infof = merged_store->stores[0]; |
2686 | if (!info->ops[idx].base_addr |
2687 | || maybe_ne (a: info->ops[idx].bitpos - infof->ops[idx].bitpos, |
2688 | b: info->bitpos - infof->bitpos) |
2689 | || !operand_equal_p (info->ops[idx].base_addr, |
2690 | infof->ops[idx].base_addr, flags: 0)) |
2691 | return false; |
2692 | |
2693 | store_immediate_info *infol = merged_store->stores.last (); |
2694 | tree load_vuse = gimple_vuse (g: info->ops[idx].stmt); |
2695 | /* In this case all vuses should be the same, e.g. |
2696 | _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4; |
2697 | or |
2698 | _1 = s.a; _2 = s.b; t.a = _1; t.b = _2; |
2699 | and we can emit the coalesced load next to any of those loads. */ |
2700 | if (gimple_vuse (g: infof->ops[idx].stmt) == load_vuse |
2701 | && gimple_vuse (g: infol->ops[idx].stmt) == load_vuse) |
2702 | return true; |
2703 | |
2704 | /* Otherwise, at least for now require that the load has the same |
2705 | vuse as the store. See following examples. */ |
2706 | if (gimple_vuse (g: info->stmt) != load_vuse) |
2707 | return false; |
2708 | |
2709 | if (gimple_vuse (g: infof->stmt) != gimple_vuse (g: infof->ops[idx].stmt) |
2710 | || (infof != infol |
2711 | && gimple_vuse (g: infol->stmt) != gimple_vuse (g: infol->ops[idx].stmt))) |
2712 | return false; |
2713 | |
2714 | /* If the load is from the same location as the store, already |
2715 | the construction of the immediate chain info guarantees no intervening |
2716 | stores, so no further checks are needed. Example: |
2717 | _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */ |
2718 | if (known_eq (info->ops[idx].bitpos, info->bitpos) |
2719 | && operand_equal_p (info->ops[idx].base_addr, base_addr, flags: 0)) |
2720 | return true; |
2721 | |
2722 | /* Otherwise, we need to punt if any of the loads can be clobbered by any |
2723 | of the stores in the group, or any other stores in between those. |
2724 | Previous calls to compatible_load_p ensured that for all the |
2725 | merged_store->stores IDX loads, no stmts starting with |
2726 | merged_store->first_stmt and ending right before merged_store->last_stmt |
2727 | clobbers those loads. */ |
2728 | gimple *first = merged_store->first_stmt; |
2729 | gimple *last = merged_store->last_stmt; |
2730 | /* The stores are sorted by increasing store bitpos, so if info->stmt store |
2731 | comes before the so far first load, we'll be changing |
2732 | merged_store->first_stmt. In that case we need to give up if |
2733 | any of the earlier processed loads clobber with the stmts in the new |
2734 | range. */ |
2735 | if (info->order < merged_store->first_order) |
2736 | { |
2737 | for (store_immediate_info *infoc : merged_store->stores) |
2738 | if (stmts_may_clobber_ref_p (first: info->stmt, last: first, ref: infoc->ops[idx].val)) |
2739 | return false; |
2740 | first = info->stmt; |
2741 | } |
2742 | /* Similarly, we could change merged_store->last_stmt, so ensure |
2743 | in that case no stmts in the new range clobber any of the earlier |
2744 | processed loads. */ |
2745 | else if (info->order > merged_store->last_order) |
2746 | { |
2747 | for (store_immediate_info *infoc : merged_store->stores) |
2748 | if (stmts_may_clobber_ref_p (first: last, last: info->stmt, ref: infoc->ops[idx].val)) |
2749 | return false; |
2750 | last = info->stmt; |
2751 | } |
2752 | /* And finally, we'd be adding a new load to the set, ensure it isn't |
2753 | clobbered in the new range. */ |
2754 | if (stmts_may_clobber_ref_p (first, last, ref: info->ops[idx].val)) |
2755 | return false; |
2756 | |
2757 | /* Otherwise, we are looking for: |
2758 | _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4; |
2759 | or |
2760 | _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */ |
2761 | return true; |
2762 | } |
2763 | |
2764 | /* Add all refs loaded to compute VAL to REFS vector. */ |
2765 | |
2766 | void |
2767 | gather_bswap_load_refs (vec<tree> *refs, tree val) |
2768 | { |
2769 | if (TREE_CODE (val) != SSA_NAME) |
2770 | return; |
2771 | |
2772 | gimple *stmt = SSA_NAME_DEF_STMT (val); |
2773 | if (!is_gimple_assign (gs: stmt)) |
2774 | return; |
2775 | |
2776 | if (gimple_assign_load_p (stmt)) |
2777 | { |
2778 | refs->safe_push (obj: gimple_assign_rhs1 (gs: stmt)); |
2779 | return; |
2780 | } |
2781 | |
2782 | switch (gimple_assign_rhs_class (gs: stmt)) |
2783 | { |
2784 | case GIMPLE_BINARY_RHS: |
2785 | gather_bswap_load_refs (refs, val: gimple_assign_rhs2 (gs: stmt)); |
2786 | /* FALLTHRU */ |
2787 | case GIMPLE_UNARY_RHS: |
2788 | gather_bswap_load_refs (refs, val: gimple_assign_rhs1 (gs: stmt)); |
2789 | break; |
2790 | default: |
2791 | gcc_unreachable (); |
2792 | } |
2793 | } |
2794 | |
2795 | /* Check if there are any stores in M_STORE_INFO after index I |
2796 | (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap |
2797 | a potential group ending with END that have their order |
2798 | smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if |
2799 | all the stores already merged and the one under consideration |
2800 | have rhs_code of INTEGER_CST. Return true if there are no such stores. |
2801 | Consider: |
2802 | MEM[(long long int *)p_28] = 0; |
2803 | MEM[(long long int *)p_28 + 8B] = 0; |
2804 | MEM[(long long int *)p_28 + 16B] = 0; |
2805 | MEM[(long long int *)p_28 + 24B] = 0; |
2806 | _129 = (int) _130; |
2807 | MEM[(int *)p_28 + 8B] = _129; |
2808 | MEM[(int *)p_28].a = -1; |
2809 | We already have |
2810 | MEM[(long long int *)p_28] = 0; |
2811 | MEM[(int *)p_28].a = -1; |
2812 | stmts in the current group and need to consider if it is safe to |
2813 | add MEM[(long long int *)p_28 + 8B] = 0; store into the same group. |
2814 | There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129; |
2815 | store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0; |
2816 | into the group and merging of those 3 stores is successful, merged |
2817 | stmts will be emitted at the latest store from that group, i.e. |
2818 | LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store. |
2819 | The MEM[(int *)p_28 + 8B] = _129; store that originally follows |
2820 | the MEM[(long long int *)p_28 + 8B] = 0; would now be before it, |
2821 | so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0; |
2822 | into the group. That way it will be its own store group and will |
2823 | not be touched. If ALL_INTEGER_CST_P and there are overlapping |
2824 | INTEGER_CST stores, those are mergeable using merge_overlapping, |
2825 | so don't return false for those. |
2826 | |
2827 | Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER |
2828 | (exclusive), whether they don't overlap the bitrange START to END |
2829 | and have order in between FIRST_ORDER and LAST_ORDER. This is to |
2830 | prevent merging in cases like: |
2831 | MEM <char[12]> [&b + 8B] = {}; |
2832 | MEM[(short *) &b] = 5; |
2833 | _5 = *x_4(D); |
2834 | MEM <long long unsigned int> [&b + 2B] = _5; |
2835 | MEM[(char *)&b + 16B] = 88; |
2836 | MEM[(int *)&b + 20B] = 1; |
2837 | The = {} store comes in sort_by_bitpos before the = 88 store, and can't |
2838 | be merged with it, because the = _5 store overlaps these and is in between |
2839 | them in sort_by_order ordering. If it was merged, the merged store would |
2840 | go after the = _5 store and thus change behavior. */ |
2841 | |
2842 | static bool |
2843 | check_no_overlap (const vec<store_immediate_info *> &m_store_info, |
2844 | unsigned int i, |
2845 | bool all_integer_cst_p, unsigned int first_order, |
2846 | unsigned int last_order, unsigned HOST_WIDE_INT start, |
2847 | unsigned HOST_WIDE_INT end, unsigned int first_earlier, |
2848 | unsigned end_earlier) |
2849 | { |
2850 | unsigned int len = m_store_info.length (); |
2851 | for (unsigned int j = first_earlier; j < end_earlier; j++) |
2852 | { |
2853 | store_immediate_info *info = m_store_info[j]; |
2854 | if (info->order > first_order |
2855 | && info->order < last_order |
2856 | && info->bitpos + info->bitsize > start) |
2857 | return false; |
2858 | } |
2859 | for (++i; i < len; ++i) |
2860 | { |
2861 | store_immediate_info *info = m_store_info[i]; |
2862 | if (info->bitpos >= end) |
2863 | break; |
2864 | if (info->order < last_order |
2865 | && (!all_integer_cst_p || info->rhs_code != INTEGER_CST)) |
2866 | return false; |
2867 | } |
2868 | return true; |
2869 | } |
2870 | |
2871 | /* Return true if m_store_info[first] and at least one following store |
2872 | form a group which store try_size bitsize value which is byte swapped |
2873 | from a memory load or some value, or identity from some value. |
2874 | This uses the bswap pass APIs. */ |
2875 | |
2876 | bool |
2877 | imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store, |
2878 | unsigned int first, |
2879 | unsigned int try_size, |
2880 | unsigned int first_earlier) |
2881 | { |
2882 | unsigned int len = m_store_info.length (), last = first; |
2883 | unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize; |
2884 | if (width >= try_size) |
2885 | return false; |
2886 | for (unsigned int i = first + 1; i < len; ++i) |
2887 | { |
2888 | if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width |
2889 | || m_store_info[i]->lp_nr != merged_store->lp_nr |
2890 | || m_store_info[i]->ins_stmt == NULL) |
2891 | return false; |
2892 | width += m_store_info[i]->bitsize; |
2893 | if (width >= try_size) |
2894 | { |
2895 | last = i; |
2896 | break; |
2897 | } |
2898 | } |
2899 | if (width != try_size) |
2900 | return false; |
2901 | |
2902 | bool allow_unaligned |
2903 | = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned; |
2904 | /* Punt if the combined store would not be aligned and we need alignment. */ |
2905 | if (!allow_unaligned) |
2906 | { |
2907 | unsigned int align = merged_store->align; |
2908 | unsigned HOST_WIDE_INT align_base = merged_store->align_base; |
2909 | for (unsigned int i = first + 1; i <= last; ++i) |
2910 | { |
2911 | unsigned int this_align; |
2912 | unsigned HOST_WIDE_INT align_bitpos = 0; |
2913 | get_object_alignment_1 (gimple_assign_lhs (gs: m_store_info[i]->stmt), |
2914 | &this_align, &align_bitpos); |
2915 | if (this_align > align) |
2916 | { |
2917 | align = this_align; |
2918 | align_base = m_store_info[i]->bitpos - align_bitpos; |
2919 | } |
2920 | } |
2921 | unsigned HOST_WIDE_INT align_bitpos |
2922 | = (m_store_info[first]->bitpos - align_base) & (align - 1); |
2923 | if (align_bitpos) |
2924 | align = least_bit_hwi (x: align_bitpos); |
2925 | if (align < try_size) |
2926 | return false; |
2927 | } |
2928 | |
2929 | tree type; |
2930 | switch (try_size) |
2931 | { |
2932 | case 16: type = uint16_type_node; break; |
2933 | case 32: type = uint32_type_node; break; |
2934 | case 64: type = uint64_type_node; break; |
2935 | default: gcc_unreachable (); |
2936 | } |
2937 | struct symbolic_number n; |
2938 | gimple *ins_stmt = NULL; |
2939 | int vuse_store = -1; |
2940 | unsigned int first_order = merged_store->first_order; |
2941 | unsigned int last_order = merged_store->last_order; |
2942 | gimple *first_stmt = merged_store->first_stmt; |
2943 | gimple *last_stmt = merged_store->last_stmt; |
2944 | unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width; |
2945 | store_immediate_info *infof = m_store_info[first]; |
2946 | |
2947 | for (unsigned int i = first; i <= last; ++i) |
2948 | { |
2949 | store_immediate_info *info = m_store_info[i]; |
2950 | struct symbolic_number this_n = info->n; |
2951 | this_n.type = type; |
2952 | if (!this_n.base_addr) |
2953 | this_n.range = try_size / BITS_PER_UNIT; |
2954 | else |
2955 | /* Update vuse in case it has changed by output_merged_stores. */ |
2956 | this_n.vuse = gimple_vuse (g: info->ins_stmt); |
2957 | unsigned int bitpos = info->bitpos - infof->bitpos; |
2958 | if (!do_shift_rotate (code: LSHIFT_EXPR, n: &this_n, |
2959 | BYTES_BIG_ENDIAN |
2960 | ? try_size - info->bitsize - bitpos |
2961 | : bitpos)) |
2962 | return false; |
2963 | if (this_n.base_addr && vuse_store) |
2964 | { |
2965 | unsigned int j; |
2966 | for (j = first; j <= last; ++j) |
2967 | if (this_n.vuse == gimple_vuse (g: m_store_info[j]->stmt)) |
2968 | break; |
2969 | if (j > last) |
2970 | { |
2971 | if (vuse_store == 1) |
2972 | return false; |
2973 | vuse_store = 0; |
2974 | } |
2975 | } |
2976 | if (i == first) |
2977 | { |
2978 | n = this_n; |
2979 | ins_stmt = info->ins_stmt; |
2980 | } |
2981 | else |
2982 | { |
2983 | if (n.base_addr && n.vuse != this_n.vuse) |
2984 | { |
2985 | if (vuse_store == 0) |
2986 | return false; |
2987 | vuse_store = 1; |
2988 | } |
2989 | if (info->order > last_order) |
2990 | { |
2991 | last_order = info->order; |
2992 | last_stmt = info->stmt; |
2993 | } |
2994 | else if (info->order < first_order) |
2995 | { |
2996 | first_order = info->order; |
2997 | first_stmt = info->stmt; |
2998 | } |
2999 | end = MAX (end, info->bitpos + info->bitsize); |
3000 | |
3001 | ins_stmt = perform_symbolic_merge (source_stmt1: ins_stmt, n1: &n, source_stmt2: info->ins_stmt, |
3002 | n2: &this_n, n: &n, code: BIT_IOR_EXPR); |
3003 | if (ins_stmt == NULL) |
3004 | return false; |
3005 | } |
3006 | } |
3007 | |
3008 | uint64_t cmpxchg, cmpnop; |
3009 | bool cast64_to_32; |
3010 | find_bswap_or_nop_finalize (n: &n, cmpxchg: &cmpxchg, cmpnop: &cmpnop, cast64_to_32: &cast64_to_32); |
3011 | |
3012 | /* A complete byte swap should make the symbolic number to start with |
3013 | the largest digit in the highest order byte. Unchanged symbolic |
3014 | number indicates a read with same endianness as target architecture. */ |
3015 | if (n.n != cmpnop && n.n != cmpxchg) |
3016 | return false; |
3017 | |
3018 | /* For now. */ |
3019 | if (cast64_to_32) |
3020 | return false; |
3021 | |
3022 | if (n.base_addr == NULL_TREE && !is_gimple_val (n.src)) |
3023 | return false; |
3024 | |
3025 | if (!check_no_overlap (m_store_info, i: last, all_integer_cst_p: false, first_order, last_order, |
3026 | start: merged_store->start, end, first_earlier, end_earlier: first)) |
3027 | return false; |
3028 | |
3029 | /* Don't handle memory copy this way if normal non-bswap processing |
3030 | would handle it too. */ |
3031 | if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1) |
3032 | { |
3033 | unsigned int i; |
3034 | for (i = first; i <= last; ++i) |
3035 | if (m_store_info[i]->rhs_code != MEM_REF) |
3036 | break; |
3037 | if (i == last + 1) |
3038 | return false; |
3039 | } |
3040 | |
3041 | if (n.n == cmpxchg) |
3042 | switch (try_size) |
3043 | { |
3044 | case 16: |
3045 | /* Will emit LROTATE_EXPR. */ |
3046 | break; |
3047 | case 32: |
3048 | if (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP32) |
3049 | && optab_handler (op: bswap_optab, SImode) != CODE_FOR_nothing) |
3050 | break; |
3051 | return false; |
3052 | case 64: |
3053 | if (builtin_decl_explicit_p (fncode: BUILT_IN_BSWAP64) |
3054 | && optab_handler (op: bswap_optab, DImode) != CODE_FOR_nothing) |
3055 | break; |
3056 | return false; |
3057 | default: |
3058 | gcc_unreachable (); |
3059 | } |
3060 | |
3061 | if (!allow_unaligned && n.base_addr) |
3062 | { |
3063 | unsigned int align = get_object_alignment (n.src); |
3064 | if (align < try_size) |
3065 | return false; |
3066 | } |
3067 | |
3068 | /* If each load has vuse of the corresponding store, need to verify |
3069 | the loads can be sunk right before the last store. */ |
3070 | if (vuse_store == 1) |
3071 | { |
3072 | auto_vec<tree, 64> refs; |
3073 | for (unsigned int i = first; i <= last; ++i) |
3074 | gather_bswap_load_refs (refs: &refs, |
3075 | val: gimple_assign_rhs1 (gs: m_store_info[i]->stmt)); |
3076 | |
3077 | for (tree ref : refs) |
3078 | if (stmts_may_clobber_ref_p (first: first_stmt, last: last_stmt, ref)) |
3079 | return false; |
3080 | n.vuse = NULL_TREE; |
3081 | } |
3082 | |
3083 | infof->n = n; |
3084 | infof->ins_stmt = ins_stmt; |
3085 | for (unsigned int i = first; i <= last; ++i) |
3086 | { |
3087 | m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR; |
3088 | m_store_info[i]->ops[0].base_addr = NULL_TREE; |
3089 | m_store_info[i]->ops[1].base_addr = NULL_TREE; |
3090 | if (i != first) |
3091 | merged_store->merge_into (info: m_store_info[i]); |
3092 | } |
3093 | |
3094 | return true; |
3095 | } |
3096 | |
3097 | /* Go through the candidate stores recorded in m_store_info and merge them |
3098 | into merged_store_group objects recorded into m_merged_store_groups |
3099 | representing the widened stores. Return true if coalescing was successful |
3100 | and the number of widened stores is fewer than the original number |
3101 | of stores. */ |
3102 | |
3103 | bool |
3104 | imm_store_chain_info::coalesce_immediate_stores () |
3105 | { |
3106 | /* Anything less can't be processed. */ |
3107 | if (m_store_info.length () < 2) |
3108 | return false; |
3109 | |
3110 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3111 | fprintf (stream: dump_file, format: "Attempting to coalesce %u stores in chain\n" , |
3112 | m_store_info.length ()); |
3113 | |
3114 | store_immediate_info *info; |
3115 | unsigned int i, ignore = 0; |
3116 | unsigned int first_earlier = 0; |
3117 | unsigned int end_earlier = 0; |
3118 | |
3119 | /* Order the stores by the bitposition they write to. */ |
3120 | m_store_info.qsort (sort_by_bitpos); |
3121 | |
3122 | info = m_store_info[0]; |
3123 | merged_store_group *merged_store = new merged_store_group (info); |
3124 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3125 | fputs (s: "New store group\n" , stream: dump_file); |
3126 | |
3127 | FOR_EACH_VEC_ELT (m_store_info, i, info) |
3128 | { |
3129 | unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end; |
3130 | |
3131 | if (i <= ignore) |
3132 | goto done; |
3133 | |
3134 | while (first_earlier < end_earlier |
3135 | && (m_store_info[first_earlier]->bitpos |
3136 | + m_store_info[first_earlier]->bitsize |
3137 | <= merged_store->start)) |
3138 | first_earlier++; |
3139 | |
3140 | /* First try to handle group of stores like: |
3141 | p[0] = data >> 24; |
3142 | p[1] = data >> 16; |
3143 | p[2] = data >> 8; |
3144 | p[3] = data; |
3145 | using the bswap framework. */ |
3146 | if (info->bitpos == merged_store->start + merged_store->width |
3147 | && merged_store->stores.length () == 1 |
3148 | && merged_store->stores[0]->ins_stmt != NULL |
3149 | && info->lp_nr == merged_store->lp_nr |
3150 | && info->ins_stmt != NULL) |
3151 | { |
3152 | unsigned int try_size; |
3153 | for (try_size = 64; try_size >= 16; try_size >>= 1) |
3154 | if (try_coalesce_bswap (merged_store, first: i - 1, try_size, |
3155 | first_earlier)) |
3156 | break; |
3157 | |
3158 | if (try_size >= 16) |
3159 | { |
3160 | ignore = i + merged_store->stores.length () - 1; |
3161 | m_merged_store_groups.safe_push (obj: merged_store); |
3162 | if (ignore < m_store_info.length ()) |
3163 | { |
3164 | merged_store = new merged_store_group (m_store_info[ignore]); |
3165 | end_earlier = ignore; |
3166 | } |
3167 | else |
3168 | merged_store = NULL; |
3169 | goto done; |
3170 | } |
3171 | } |
3172 | |
3173 | new_bitregion_start |
3174 | = MIN (merged_store->bitregion_start, info->bitregion_start); |
3175 | new_bitregion_end |
3176 | = MAX (merged_store->bitregion_end, info->bitregion_end); |
3177 | |
3178 | if (info->order >= merged_store->first_nonmergeable_order |
3179 | || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT) |
3180 | > (unsigned) param_store_merging_max_size)) |
3181 | ; |
3182 | |
3183 | /* |---store 1---| |
3184 | |---store 2---| |
3185 | Overlapping stores. */ |
3186 | else if (IN_RANGE (info->bitpos, merged_store->start, |
3187 | merged_store->start + merged_store->width - 1) |
3188 | /* |---store 1---||---store 2---| |
3189 | Handle also the consecutive INTEGER_CST stores case here, |
3190 | as we have here the code to deal with overlaps. */ |
3191 | || (info->bitregion_start <= merged_store->bitregion_end |
3192 | && info->rhs_code == INTEGER_CST |
3193 | && merged_store->only_constants |
3194 | && merged_store->can_be_merged_into (info))) |
3195 | { |
3196 | /* Only allow overlapping stores of constants. */ |
3197 | if (info->rhs_code == INTEGER_CST |
3198 | && merged_store->only_constants |
3199 | && info->lp_nr == merged_store->lp_nr) |
3200 | { |
3201 | unsigned int first_order |
3202 | = MIN (merged_store->first_order, info->order); |
3203 | unsigned int last_order |
3204 | = MAX (merged_store->last_order, info->order); |
3205 | unsigned HOST_WIDE_INT end |
3206 | = MAX (merged_store->start + merged_store->width, |
3207 | info->bitpos + info->bitsize); |
3208 | if (check_no_overlap (m_store_info, i, all_integer_cst_p: true, first_order, |
3209 | last_order, start: merged_store->start, end, |
3210 | first_earlier, end_earlier)) |
3211 | { |
3212 | /* check_no_overlap call above made sure there are no |
3213 | overlapping stores with non-INTEGER_CST rhs_code |
3214 | in between the first and last of the stores we've |
3215 | just merged. If there are any INTEGER_CST rhs_code |
3216 | stores in between, we need to merge_overlapping them |
3217 | even if in the sort_by_bitpos order there are other |
3218 | overlapping stores in between. Keep those stores as is. |
3219 | Example: |
3220 | MEM[(int *)p_28] = 0; |
3221 | MEM[(char *)p_28 + 3B] = 1; |
3222 | MEM[(char *)p_28 + 1B] = 2; |
3223 | MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B]; |
3224 | We can't merge the zero store with the store of two and |
3225 | not merge anything else, because the store of one is |
3226 | in the original order in between those two, but in |
3227 | store_by_bitpos order it comes after the last store that |
3228 | we can't merge with them. We can merge the first 3 stores |
3229 | and keep the last store as is though. */ |
3230 | unsigned int len = m_store_info.length (); |
3231 | unsigned int try_order = last_order; |
3232 | unsigned int first_nonmergeable_order; |
3233 | unsigned int k; |
3234 | bool last_iter = false; |
3235 | int attempts = 0; |
3236 | do |
3237 | { |
3238 | unsigned int max_order = 0; |
3239 | unsigned int min_order = first_order; |
3240 | unsigned first_nonmergeable_int_order = ~0U; |
3241 | unsigned HOST_WIDE_INT this_end = end; |
3242 | k = i; |
3243 | first_nonmergeable_order = ~0U; |
3244 | for (unsigned int j = i + 1; j < len; ++j) |
3245 | { |
3246 | store_immediate_info *info2 = m_store_info[j]; |
3247 | if (info2->bitpos >= this_end) |
3248 | break; |
3249 | if (info2->order < try_order) |
3250 | { |
3251 | if (info2->rhs_code != INTEGER_CST |
3252 | || info2->lp_nr != merged_store->lp_nr) |
3253 | { |
3254 | /* Normally check_no_overlap makes sure this |
3255 | doesn't happen, but if end grows below, |
3256 | then we need to process more stores than |
3257 | check_no_overlap verified. Example: |
3258 | MEM[(int *)p_5] = 0; |
3259 | MEM[(short *)p_5 + 3B] = 1; |
3260 | MEM[(char *)p_5 + 4B] = _9; |
3261 | MEM[(char *)p_5 + 2B] = 2; */ |
3262 | k = 0; |
3263 | break; |
3264 | } |
3265 | k = j; |
3266 | min_order = MIN (min_order, info2->order); |
3267 | this_end = MAX (this_end, |
3268 | info2->bitpos + info2->bitsize); |
3269 | } |
3270 | else if (info2->rhs_code == INTEGER_CST |
3271 | && info2->lp_nr == merged_store->lp_nr |
3272 | && !last_iter) |
3273 | { |
3274 | max_order = MAX (max_order, info2->order + 1); |
3275 | first_nonmergeable_int_order |
3276 | = MIN (first_nonmergeable_int_order, |
3277 | info2->order); |
3278 | } |
3279 | else |
3280 | first_nonmergeable_order |
3281 | = MIN (first_nonmergeable_order, info2->order); |
3282 | } |
3283 | if (k > i |
3284 | && !check_no_overlap (m_store_info, i: len - 1, all_integer_cst_p: true, |
3285 | first_order: min_order, last_order: try_order, |
3286 | start: merged_store->start, end: this_end, |
3287 | first_earlier, end_earlier)) |
3288 | k = 0; |
3289 | if (k == 0) |
3290 | { |
3291 | if (last_order == try_order) |
3292 | break; |
3293 | /* If this failed, but only because we grew |
3294 | try_order, retry with the last working one, |
3295 | so that we merge at least something. */ |
3296 | try_order = last_order; |
3297 | last_iter = true; |
3298 | continue; |
3299 | } |
3300 | last_order = try_order; |
3301 | /* Retry with a larger try_order to see if we could |
3302 | merge some further INTEGER_CST stores. */ |
3303 | if (max_order |
3304 | && (first_nonmergeable_int_order |
3305 | < first_nonmergeable_order)) |
3306 | { |
3307 | try_order = MIN (max_order, |
3308 | first_nonmergeable_order); |
3309 | try_order |
3310 | = MIN (try_order, |
3311 | merged_store->first_nonmergeable_order); |
3312 | if (try_order > last_order && ++attempts < 16) |
3313 | continue; |
3314 | } |
3315 | first_nonmergeable_order |
3316 | = MIN (first_nonmergeable_order, |
3317 | first_nonmergeable_int_order); |
3318 | end = this_end; |
3319 | break; |
3320 | } |
3321 | while (1); |
3322 | |
3323 | if (k != 0) |
3324 | { |
3325 | merged_store->merge_overlapping (info); |
3326 | |
3327 | merged_store->first_nonmergeable_order |
3328 | = MIN (merged_store->first_nonmergeable_order, |
3329 | first_nonmergeable_order); |
3330 | |
3331 | for (unsigned int j = i + 1; j <= k; j++) |
3332 | { |
3333 | store_immediate_info *info2 = m_store_info[j]; |
3334 | gcc_assert (info2->bitpos < end); |
3335 | if (info2->order < last_order) |
3336 | { |
3337 | gcc_assert (info2->rhs_code == INTEGER_CST); |
3338 | if (info != info2) |
3339 | merged_store->merge_overlapping (info: info2); |
3340 | } |
3341 | /* Other stores are kept and not merged in any |
3342 | way. */ |
3343 | } |
3344 | ignore = k; |
3345 | goto done; |
3346 | } |
3347 | } |
3348 | } |
3349 | } |
3350 | /* |---store 1---||---store 2---| |
3351 | This store is consecutive to the previous one. |
3352 | Merge it into the current store group. There can be gaps in between |
3353 | the stores, but there can't be gaps in between bitregions. */ |
3354 | else if (info->bitregion_start <= merged_store->bitregion_end |
3355 | && merged_store->can_be_merged_into (info)) |
3356 | { |
3357 | store_immediate_info *infof = merged_store->stores[0]; |
3358 | |
3359 | /* All the rhs_code ops that take 2 operands are commutative, |
3360 | swap the operands if it could make the operands compatible. */ |
3361 | if (infof->ops[0].base_addr |
3362 | && infof->ops[1].base_addr |
3363 | && info->ops[0].base_addr |
3364 | && info->ops[1].base_addr |
3365 | && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos, |
3366 | info->bitpos - infof->bitpos) |
3367 | && operand_equal_p (info->ops[1].base_addr, |
3368 | infof->ops[0].base_addr, flags: 0)) |
3369 | { |
3370 | std::swap (a&: info->ops[0], b&: info->ops[1]); |
3371 | info->ops_swapped_p = true; |
3372 | } |
3373 | if (check_no_overlap (m_store_info, i, all_integer_cst_p: false, |
3374 | MIN (merged_store->first_order, info->order), |
3375 | MAX (merged_store->last_order, info->order), |
3376 | start: merged_store->start, |
3377 | MAX (merged_store->start + merged_store->width, |
3378 | info->bitpos + info->bitsize), |
3379 | first_earlier, end_earlier)) |
3380 | { |
3381 | /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */ |
3382 | if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF) |
3383 | { |
3384 | info->rhs_code = BIT_INSERT_EXPR; |
3385 | info->ops[0].val = gimple_assign_rhs1 (gs: info->stmt); |
3386 | info->ops[0].base_addr = NULL_TREE; |
3387 | } |
3388 | else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF) |
3389 | { |
3390 | for (store_immediate_info *infoj : merged_store->stores) |
3391 | { |
3392 | infoj->rhs_code = BIT_INSERT_EXPR; |
3393 | infoj->ops[0].val = gimple_assign_rhs1 (gs: infoj->stmt); |
3394 | infoj->ops[0].base_addr = NULL_TREE; |
3395 | } |
3396 | merged_store->bit_insertion = true; |
3397 | } |
3398 | if ((infof->ops[0].base_addr |
3399 | ? compatible_load_p (merged_store, info, base_addr, idx: 0) |
3400 | : !info->ops[0].base_addr) |
3401 | && (infof->ops[1].base_addr |
3402 | ? compatible_load_p (merged_store, info, base_addr, idx: 1) |
3403 | : !info->ops[1].base_addr)) |
3404 | { |
3405 | merged_store->merge_into (info); |
3406 | goto done; |
3407 | } |
3408 | } |
3409 | } |
3410 | |
3411 | /* |---store 1---| <gap> |---store 2---|. |
3412 | Gap between stores or the rhs not compatible. Start a new group. */ |
3413 | |
3414 | /* Try to apply all the stores recorded for the group to determine |
3415 | the bitpattern they write and discard it if that fails. |
3416 | This will also reject single-store groups. */ |
3417 | if (merged_store->apply_stores ()) |
3418 | m_merged_store_groups.safe_push (obj: merged_store); |
3419 | else |
3420 | delete merged_store; |
3421 | |
3422 | merged_store = new merged_store_group (info); |
3423 | end_earlier = i; |
3424 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3425 | fputs (s: "New store group\n" , stream: dump_file); |
3426 | |
3427 | done: |
3428 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3429 | { |
3430 | fprintf (stream: dump_file, format: "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC |
3431 | " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:" , |
3432 | i, info->bitsize, info->bitpos); |
3433 | print_generic_expr (dump_file, gimple_assign_rhs1 (gs: info->stmt)); |
3434 | fputc (c: '\n', stream: dump_file); |
3435 | } |
3436 | } |
3437 | |
3438 | /* Record or discard the last store group. */ |
3439 | if (merged_store) |
3440 | { |
3441 | if (merged_store->apply_stores ()) |
3442 | m_merged_store_groups.safe_push (obj: merged_store); |
3443 | else |
3444 | delete merged_store; |
3445 | } |
3446 | |
3447 | gcc_assert (m_merged_store_groups.length () <= m_store_info.length ()); |
3448 | |
3449 | bool success |
3450 | = !m_merged_store_groups.is_empty () |
3451 | && m_merged_store_groups.length () < m_store_info.length (); |
3452 | |
3453 | if (success && dump_file) |
3454 | fprintf (stream: dump_file, format: "Coalescing successful!\nMerged into %u stores\n" , |
3455 | m_merged_store_groups.length ()); |
3456 | |
3457 | return success; |
3458 | } |
3459 | |
3460 | /* Return the type to use for the merged stores or loads described by STMTS. |
3461 | This is needed to get the alias sets right. If IS_LOAD, look for rhs, |
3462 | otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_* |
3463 | of the MEM_REFs if any. */ |
3464 | |
3465 | static tree |
3466 | get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load, |
3467 | unsigned short *cliquep, unsigned short *basep) |
3468 | { |
3469 | gimple *stmt; |
3470 | unsigned int i; |
3471 | tree type = NULL_TREE; |
3472 | tree ret = NULL_TREE; |
3473 | *cliquep = 0; |
3474 | *basep = 0; |
3475 | |
3476 | FOR_EACH_VEC_ELT (stmts, i, stmt) |
3477 | { |
3478 | tree ref = is_load ? gimple_assign_rhs1 (gs: stmt) |
3479 | : gimple_assign_lhs (gs: stmt); |
3480 | tree type1 = reference_alias_ptr_type (ref); |
3481 | tree base = get_base_address (t: ref); |
3482 | |
3483 | if (i == 0) |
3484 | { |
3485 | if (TREE_CODE (base) == MEM_REF) |
3486 | { |
3487 | *cliquep = MR_DEPENDENCE_CLIQUE (base); |
3488 | *basep = MR_DEPENDENCE_BASE (base); |
3489 | } |
3490 | ret = type = type1; |
3491 | continue; |
3492 | } |
3493 | if (!alias_ptr_types_compatible_p (type, type1)) |
3494 | ret = ptr_type_node; |
3495 | if (TREE_CODE (base) != MEM_REF |
3496 | || *cliquep != MR_DEPENDENCE_CLIQUE (base) |
3497 | || *basep != MR_DEPENDENCE_BASE (base)) |
3498 | { |
3499 | *cliquep = 0; |
3500 | *basep = 0; |
3501 | } |
3502 | } |
3503 | return ret; |
3504 | } |
3505 | |
3506 | /* Return the location_t information we can find among the statements |
3507 | in STMTS. */ |
3508 | |
3509 | static location_t |
3510 | get_location_for_stmts (vec<gimple *> &stmts) |
3511 | { |
3512 | for (gimple *stmt : stmts) |
3513 | if (gimple_has_location (g: stmt)) |
3514 | return gimple_location (g: stmt); |
3515 | |
3516 | return UNKNOWN_LOCATION; |
3517 | } |
3518 | |
3519 | /* Used to decribe a store resulting from splitting a wide store in smaller |
3520 | regularly-sized stores in split_group. */ |
3521 | |
3522 | class split_store |
3523 | { |
3524 | public: |
3525 | unsigned HOST_WIDE_INT bytepos; |
3526 | unsigned HOST_WIDE_INT size; |
3527 | unsigned HOST_WIDE_INT align; |
3528 | auto_vec<store_immediate_info *> orig_stores; |
3529 | /* True if there is a single orig stmt covering the whole split store. */ |
3530 | bool orig; |
3531 | split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
3532 | unsigned HOST_WIDE_INT); |
3533 | }; |
3534 | |
3535 | /* Simple constructor. */ |
3536 | |
3537 | split_store::split_store (unsigned HOST_WIDE_INT bp, |
3538 | unsigned HOST_WIDE_INT sz, |
3539 | unsigned HOST_WIDE_INT al) |
3540 | : bytepos (bp), size (sz), align (al), orig (false) |
3541 | { |
3542 | orig_stores.create (nelems: 0); |
3543 | } |
3544 | |
3545 | /* Record all stores in GROUP that write to the region starting at BITPOS and |
3546 | is of size BITSIZE. Record infos for such statements in STORES if |
3547 | non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO |
3548 | if there is exactly one original store in the range (in that case ignore |
3549 | clobber stmts, unless there are only clobber stmts). */ |
3550 | |
3551 | static store_immediate_info * |
3552 | find_constituent_stores (class merged_store_group *group, |
3553 | vec<store_immediate_info *> *stores, |
3554 | unsigned int *first, |
3555 | unsigned HOST_WIDE_INT bitpos, |
3556 | unsigned HOST_WIDE_INT bitsize) |
3557 | { |
3558 | store_immediate_info *info, *ret = NULL; |
3559 | unsigned int i; |
3560 | bool second = false; |
3561 | bool update_first = true; |
3562 | unsigned HOST_WIDE_INT end = bitpos + bitsize; |
3563 | for (i = *first; group->stores.iterate (ix: i, ptr: &info); ++i) |
3564 | { |
3565 | unsigned HOST_WIDE_INT stmt_start = info->bitpos; |
3566 | unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize; |
3567 | if (stmt_end <= bitpos) |
3568 | { |
3569 | /* BITPOS passed to this function never decreases from within the |
3570 | same split_group call, so optimize and don't scan info records |
3571 | which are known to end before or at BITPOS next time. |
3572 | Only do it if all stores before this one also pass this. */ |
3573 | if (update_first) |
3574 | *first = i + 1; |
3575 | continue; |
3576 | } |
3577 | else |
3578 | update_first = false; |
3579 | |
3580 | /* The stores in GROUP are ordered by bitposition so if we're past |
3581 | the region for this group return early. */ |
3582 | if (stmt_start >= end) |
3583 | return ret; |
3584 | |
3585 | if (gimple_clobber_p (s: info->stmt)) |
3586 | { |
3587 | if (stores) |
3588 | stores->safe_push (obj: info); |
3589 | if (ret == NULL) |
3590 | ret = info; |
3591 | continue; |
3592 | } |
3593 | if (stores) |
3594 | { |
3595 | stores->safe_push (obj: info); |
3596 | if (ret && !gimple_clobber_p (s: ret->stmt)) |
3597 | { |
3598 | ret = NULL; |
3599 | second = true; |
3600 | } |
3601 | } |
3602 | else if (ret && !gimple_clobber_p (s: ret->stmt)) |
3603 | return NULL; |
3604 | if (!second) |
3605 | ret = info; |
3606 | } |
3607 | return ret; |
3608 | } |
3609 | |
3610 | /* Return how many SSA_NAMEs used to compute value to store in the INFO |
3611 | store have multiple uses. If any SSA_NAME has multiple uses, also |
3612 | count statements needed to compute it. */ |
3613 | |
3614 | static unsigned |
3615 | count_multiple_uses (store_immediate_info *info) |
3616 | { |
3617 | gimple *stmt = info->stmt; |
3618 | unsigned ret = 0; |
3619 | switch (info->rhs_code) |
3620 | { |
3621 | case INTEGER_CST: |
3622 | case STRING_CST: |
3623 | return 0; |
3624 | case BIT_AND_EXPR: |
3625 | case BIT_IOR_EXPR: |
3626 | case BIT_XOR_EXPR: |
3627 | if (info->bit_not_p) |
3628 | { |
3629 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt))) |
3630 | ret = 1; /* Fall through below to return |
3631 | the BIT_NOT_EXPR stmt and then |
3632 | BIT_{AND,IOR,XOR}_EXPR and anything it |
3633 | uses. */ |
3634 | else |
3635 | /* stmt is after this the BIT_NOT_EXPR. */ |
3636 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
3637 | } |
3638 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt))) |
3639 | { |
3640 | ret += 1 + info->ops[0].bit_not_p; |
3641 | if (info->ops[1].base_addr) |
3642 | ret += 1 + info->ops[1].bit_not_p; |
3643 | return ret + 1; |
3644 | } |
3645 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
3646 | /* stmt is now the BIT_*_EXPR. */ |
3647 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt))) |
3648 | ret += 1 + info->ops[info->ops_swapped_p].bit_not_p; |
3649 | else if (info->ops[info->ops_swapped_p].bit_not_p) |
3650 | { |
3651 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
3652 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt2))) |
3653 | ++ret; |
3654 | } |
3655 | if (info->ops[1].base_addr == NULL_TREE) |
3656 | { |
3657 | gcc_checking_assert (!info->ops_swapped_p); |
3658 | return ret; |
3659 | } |
3660 | if (!has_single_use (var: gimple_assign_rhs2 (gs: stmt))) |
3661 | ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p; |
3662 | else if (info->ops[1 - info->ops_swapped_p].bit_not_p) |
3663 | { |
3664 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); |
3665 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt2))) |
3666 | ++ret; |
3667 | } |
3668 | return ret; |
3669 | case MEM_REF: |
3670 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt))) |
3671 | return 1 + info->ops[0].bit_not_p; |
3672 | else if (info->ops[0].bit_not_p) |
3673 | { |
3674 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
3675 | if (!has_single_use (var: gimple_assign_rhs1 (gs: stmt))) |
3676 | return 1; |
3677 | } |
3678 | return 0; |
3679 | case BIT_INSERT_EXPR: |
3680 | return has_single_use (var: gimple_assign_rhs1 (gs: stmt)) ? 0 : 1; |
3681 | default: |
3682 | gcc_unreachable (); |
3683 | } |
3684 | } |
3685 | |
3686 | /* Split a merged store described by GROUP by populating the SPLIT_STORES |
3687 | vector (if non-NULL) with split_store structs describing the byte offset |
3688 | (from the base), the bit size and alignment of each store as well as the |
3689 | original statements involved in each such split group. |
3690 | This is to separate the splitting strategy from the statement |
3691 | building/emission/linking done in output_merged_store. |
3692 | Return number of new stores. |
3693 | If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned. |
3694 | If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned. |
3695 | BZERO_FIRST may be true only when the first store covers the whole group |
3696 | and clears it; if BZERO_FIRST is true, keep that first store in the set |
3697 | unmodified and emit further stores for the overrides only. |
3698 | If SPLIT_STORES is NULL, it is just a dry run to count number of |
3699 | new stores. */ |
3700 | |
3701 | static unsigned int |
3702 | split_group (merged_store_group *group, bool allow_unaligned_store, |
3703 | bool allow_unaligned_load, bool bzero_first, |
3704 | vec<split_store *> *split_stores, |
3705 | unsigned *total_orig, |
3706 | unsigned *total_new) |
3707 | { |
3708 | unsigned HOST_WIDE_INT pos = group->bitregion_start; |
3709 | unsigned HOST_WIDE_INT size = group->bitregion_end - pos; |
3710 | unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT; |
3711 | unsigned HOST_WIDE_INT group_align = group->align; |
3712 | unsigned HOST_WIDE_INT align_base = group->align_base; |
3713 | unsigned HOST_WIDE_INT group_load_align = group_align; |
3714 | bool any_orig = false; |
3715 | |
3716 | gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0)); |
3717 | |
3718 | /* For bswap framework using sets of stores, all the checking has been done |
3719 | earlier in try_coalesce_bswap and the result always needs to be emitted |
3720 | as a single store. Likewise for string concatenation. */ |
3721 | if (group->stores[0]->rhs_code == LROTATE_EXPR |
3722 | || group->stores[0]->rhs_code == NOP_EXPR |
3723 | || group->string_concatenation) |
3724 | { |
3725 | gcc_assert (!bzero_first); |
3726 | if (total_orig) |
3727 | { |
3728 | /* Avoid the old/new stmt count heuristics. It should be |
3729 | always beneficial. */ |
3730 | total_new[0] = 1; |
3731 | total_orig[0] = 2; |
3732 | } |
3733 | |
3734 | if (split_stores) |
3735 | { |
3736 | unsigned HOST_WIDE_INT align_bitpos |
3737 | = (group->start - align_base) & (group_align - 1); |
3738 | unsigned HOST_WIDE_INT align = group_align; |
3739 | if (align_bitpos) |
3740 | align = least_bit_hwi (x: align_bitpos); |
3741 | bytepos = group->start / BITS_PER_UNIT; |
3742 | split_store *store |
3743 | = new split_store (bytepos, group->width, align); |
3744 | unsigned int first = 0; |
3745 | find_constituent_stores (group, stores: &store->orig_stores, |
3746 | first: &first, bitpos: group->start, bitsize: group->width); |
3747 | split_stores->safe_push (obj: store); |
3748 | } |
3749 | |
3750 | return 1; |
3751 | } |
3752 | |
3753 | unsigned int ret = 0, first = 0; |
3754 | unsigned HOST_WIDE_INT try_pos = bytepos; |
3755 | |
3756 | if (total_orig) |
3757 | { |
3758 | unsigned int i; |
3759 | store_immediate_info *info = group->stores[0]; |
3760 | |
3761 | total_new[0] = 0; |
3762 | total_orig[0] = 1; /* The orig store. */ |
3763 | info = group->stores[0]; |
3764 | if (info->ops[0].base_addr) |
3765 | total_orig[0]++; |
3766 | if (info->ops[1].base_addr) |
3767 | total_orig[0]++; |
3768 | switch (info->rhs_code) |
3769 | { |
3770 | case BIT_AND_EXPR: |
3771 | case BIT_IOR_EXPR: |
3772 | case BIT_XOR_EXPR: |
3773 | total_orig[0]++; /* The orig BIT_*_EXPR stmt. */ |
3774 | break; |
3775 | default: |
3776 | break; |
3777 | } |
3778 | total_orig[0] *= group->stores.length (); |
3779 | |
3780 | FOR_EACH_VEC_ELT (group->stores, i, info) |
3781 | { |
3782 | total_new[0] += count_multiple_uses (info); |
3783 | total_orig[0] += (info->bit_not_p |
3784 | + info->ops[0].bit_not_p |
3785 | + info->ops[1].bit_not_p); |
3786 | } |
3787 | } |
3788 | |
3789 | if (!allow_unaligned_load) |
3790 | for (int i = 0; i < 2; ++i) |
3791 | if (group->load_align[i]) |
3792 | group_load_align = MIN (group_load_align, group->load_align[i]); |
3793 | |
3794 | if (bzero_first) |
3795 | { |
3796 | store_immediate_info *gstore; |
3797 | FOR_EACH_VEC_ELT (group->stores, first, gstore) |
3798 | if (!gimple_clobber_p (s: gstore->stmt)) |
3799 | break; |
3800 | ++first; |
3801 | ret = 1; |
3802 | if (split_stores) |
3803 | { |
3804 | split_store *store |
3805 | = new split_store (bytepos, gstore->bitsize, align_base); |
3806 | store->orig_stores.safe_push (obj: gstore); |
3807 | store->orig = true; |
3808 | any_orig = true; |
3809 | split_stores->safe_push (obj: store); |
3810 | } |
3811 | } |
3812 | |
3813 | while (size > 0) |
3814 | { |
3815 | if ((allow_unaligned_store || group_align <= BITS_PER_UNIT) |
3816 | && (group->mask[try_pos - bytepos] == (unsigned char) ~0U |
3817 | || (bzero_first && group->val[try_pos - bytepos] == 0))) |
3818 | { |
3819 | /* Skip padding bytes. */ |
3820 | ++try_pos; |
3821 | size -= BITS_PER_UNIT; |
3822 | continue; |
3823 | } |
3824 | |
3825 | unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
3826 | unsigned int try_size = MAX_STORE_BITSIZE, nonmasked; |
3827 | unsigned HOST_WIDE_INT align_bitpos |
3828 | = (try_bitpos - align_base) & (group_align - 1); |
3829 | unsigned HOST_WIDE_INT align = group_align; |
3830 | bool found_orig = false; |
3831 | if (align_bitpos) |
3832 | align = least_bit_hwi (x: align_bitpos); |
3833 | if (!allow_unaligned_store) |
3834 | try_size = MIN (try_size, align); |
3835 | if (!allow_unaligned_load) |
3836 | { |
3837 | /* If we can't do or don't want to do unaligned stores |
3838 | as well as loads, we need to take the loads into account |
3839 | as well. */ |
3840 | unsigned HOST_WIDE_INT load_align = group_load_align; |
3841 | align_bitpos = (try_bitpos - align_base) & (load_align - 1); |
3842 | if (align_bitpos) |
3843 | load_align = least_bit_hwi (x: align_bitpos); |
3844 | for (int i = 0; i < 2; ++i) |
3845 | if (group->load_align[i]) |
3846 | { |
3847 | align_bitpos |
3848 | = known_alignment (a: try_bitpos |
3849 | - group->stores[0]->bitpos |
3850 | + group->stores[0]->ops[i].bitpos |
3851 | - group->load_align_base[i]); |
3852 | if (align_bitpos & (group_load_align - 1)) |
3853 | { |
3854 | unsigned HOST_WIDE_INT a = least_bit_hwi (x: align_bitpos); |
3855 | load_align = MIN (load_align, a); |
3856 | } |
3857 | } |
3858 | try_size = MIN (try_size, load_align); |
3859 | } |
3860 | store_immediate_info *info |
3861 | = find_constituent_stores (group, NULL, first: &first, bitpos: try_bitpos, bitsize: try_size); |
3862 | if (info && !gimple_clobber_p (s: info->stmt)) |
3863 | { |
3864 | /* If there is just one original statement for the range, see if |
3865 | we can just reuse the original store which could be even larger |
3866 | than try_size. */ |
3867 | unsigned HOST_WIDE_INT stmt_end |
3868 | = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT); |
3869 | info = find_constituent_stores (group, NULL, first: &first, bitpos: try_bitpos, |
3870 | bitsize: stmt_end - try_bitpos); |
3871 | if (info && info->bitpos >= try_bitpos) |
3872 | { |
3873 | store_immediate_info *info2 = NULL; |
3874 | unsigned int first_copy = first; |
3875 | if (info->bitpos > try_bitpos |
3876 | && stmt_end - try_bitpos <= try_size) |
3877 | { |
3878 | info2 = find_constituent_stores (group, NULL, first: &first_copy, |
3879 | bitpos: try_bitpos, |
3880 | bitsize: info->bitpos - try_bitpos); |
3881 | gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt)); |
3882 | } |
3883 | if (info2 == NULL && stmt_end - try_bitpos < try_size) |
3884 | { |
3885 | info2 = find_constituent_stores (group, NULL, first: &first_copy, |
3886 | bitpos: stmt_end, |
3887 | bitsize: (try_bitpos + try_size) |
3888 | - stmt_end); |
3889 | gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt)); |
3890 | } |
3891 | if (info2 == NULL) |
3892 | { |
3893 | try_size = stmt_end - try_bitpos; |
3894 | found_orig = true; |
3895 | goto found; |
3896 | } |
3897 | } |
3898 | } |
3899 | |
3900 | /* Approximate store bitsize for the case when there are no padding |
3901 | bits. */ |
3902 | while (try_size > size) |
3903 | try_size /= 2; |
3904 | /* Now look for whole padding bytes at the end of that bitsize. */ |
3905 | for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked) |
3906 | if (group->mask[try_pos - bytepos + nonmasked - 1] |
3907 | != (unsigned char) ~0U |
3908 | && (!bzero_first |
3909 | || group->val[try_pos - bytepos + nonmasked - 1] != 0)) |
3910 | break; |
3911 | if (nonmasked == 0 || (info && gimple_clobber_p (s: info->stmt))) |
3912 | { |
3913 | /* If entire try_size range is padding, skip it. */ |
3914 | try_pos += try_size / BITS_PER_UNIT; |
3915 | size -= try_size; |
3916 | continue; |
3917 | } |
3918 | /* Otherwise try to decrease try_size if second half, last 3 quarters |
3919 | etc. are padding. */ |
3920 | nonmasked *= BITS_PER_UNIT; |
3921 | while (nonmasked <= try_size / 2) |
3922 | try_size /= 2; |
3923 | if (!allow_unaligned_store && group_align > BITS_PER_UNIT) |
3924 | { |
3925 | /* Now look for whole padding bytes at the start of that bitsize. */ |
3926 | unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked; |
3927 | for (masked = 0; masked < try_bytesize; ++masked) |
3928 | if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U |
3929 | && (!bzero_first |
3930 | || group->val[try_pos - bytepos + masked] != 0)) |
3931 | break; |
3932 | masked *= BITS_PER_UNIT; |
3933 | gcc_assert (masked < try_size); |
3934 | if (masked >= try_size / 2) |
3935 | { |
3936 | while (masked >= try_size / 2) |
3937 | { |
3938 | try_size /= 2; |
3939 | try_pos += try_size / BITS_PER_UNIT; |
3940 | size -= try_size; |
3941 | masked -= try_size; |
3942 | } |
3943 | /* Need to recompute the alignment, so just retry at the new |
3944 | position. */ |
3945 | continue; |
3946 | } |
3947 | } |
3948 | |
3949 | found: |
3950 | ++ret; |
3951 | |
3952 | if (split_stores) |
3953 | { |
3954 | split_store *store |
3955 | = new split_store (try_pos, try_size, align); |
3956 | info = find_constituent_stores (group, stores: &store->orig_stores, |
3957 | first: &first, bitpos: try_bitpos, bitsize: try_size); |
3958 | if (info |
3959 | && !gimple_clobber_p (s: info->stmt) |
3960 | && info->bitpos >= try_bitpos |
3961 | && info->bitpos + info->bitsize <= try_bitpos + try_size |
3962 | && (store->orig_stores.length () == 1 |
3963 | || found_orig |
3964 | || (info->bitpos == try_bitpos |
3965 | && (info->bitpos + info->bitsize |
3966 | == try_bitpos + try_size)))) |
3967 | { |
3968 | store->orig = true; |
3969 | any_orig = true; |
3970 | } |
3971 | split_stores->safe_push (obj: store); |
3972 | } |
3973 | |
3974 | try_pos += try_size / BITS_PER_UNIT; |
3975 | size -= try_size; |
3976 | } |
3977 | |
3978 | if (total_orig) |
3979 | { |
3980 | unsigned int i; |
3981 | split_store *store; |
3982 | /* If we are reusing some original stores and any of the |
3983 | original SSA_NAMEs had multiple uses, we need to subtract |
3984 | those now before we add the new ones. */ |
3985 | if (total_new[0] && any_orig) |
3986 | { |
3987 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
3988 | if (store->orig) |
3989 | total_new[0] -= count_multiple_uses (info: store->orig_stores[0]); |
3990 | } |
3991 | total_new[0] += ret; /* The new store. */ |
3992 | store_immediate_info *info = group->stores[0]; |
3993 | if (info->ops[0].base_addr) |
3994 | total_new[0] += ret; |
3995 | if (info->ops[1].base_addr) |
3996 | total_new[0] += ret; |
3997 | switch (info->rhs_code) |
3998 | { |
3999 | case BIT_AND_EXPR: |
4000 | case BIT_IOR_EXPR: |
4001 | case BIT_XOR_EXPR: |
4002 | total_new[0] += ret; /* The new BIT_*_EXPR stmt. */ |
4003 | break; |
4004 | default: |
4005 | break; |
4006 | } |
4007 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
4008 | { |
4009 | unsigned int j; |
4010 | bool bit_not_p[3] = { false, false, false }; |
4011 | /* If all orig_stores have certain bit_not_p set, then |
4012 | we'd use a BIT_NOT_EXPR stmt and need to account for it. |
4013 | If some orig_stores have certain bit_not_p set, then |
4014 | we'd use a BIT_XOR_EXPR with a mask and need to account for |
4015 | it. */ |
4016 | FOR_EACH_VEC_ELT (store->orig_stores, j, info) |
4017 | { |
4018 | if (info->ops[0].bit_not_p) |
4019 | bit_not_p[0] = true; |
4020 | if (info->ops[1].bit_not_p) |
4021 | bit_not_p[1] = true; |
4022 | if (info->bit_not_p) |
4023 | bit_not_p[2] = true; |
4024 | } |
4025 | total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2]; |
4026 | } |
4027 | |
4028 | } |
4029 | |
4030 | return ret; |
4031 | } |
4032 | |
4033 | /* Return the operation through which the operand IDX (if < 2) or |
4034 | result (IDX == 2) should be inverted. If NOP_EXPR, no inversion |
4035 | is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR, |
4036 | the bits should be xored with mask. */ |
4037 | |
4038 | static enum tree_code |
4039 | invert_op (split_store *split_store, int idx, tree int_type, tree &mask) |
4040 | { |
4041 | unsigned int i; |
4042 | store_immediate_info *info; |
4043 | unsigned int cnt = 0; |
4044 | bool any_paddings = false; |
4045 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) |
4046 | { |
4047 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; |
4048 | if (bit_not_p) |
4049 | { |
4050 | ++cnt; |
4051 | tree lhs = gimple_assign_lhs (gs: info->stmt); |
4052 | if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
4053 | && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize) |
4054 | any_paddings = true; |
4055 | } |
4056 | } |
4057 | mask = NULL_TREE; |
4058 | if (cnt == 0) |
4059 | return NOP_EXPR; |
4060 | if (cnt == split_store->orig_stores.length () && !any_paddings) |
4061 | return BIT_NOT_EXPR; |
4062 | |
4063 | unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT; |
4064 | unsigned buf_size = split_store->size / BITS_PER_UNIT; |
4065 | unsigned char *buf |
4066 | = XALLOCAVEC (unsigned char, buf_size); |
4067 | memset (s: buf, c: ~0U, n: buf_size); |
4068 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) |
4069 | { |
4070 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; |
4071 | if (!bit_not_p) |
4072 | continue; |
4073 | /* Clear regions with bit_not_p and invert afterwards, rather than |
4074 | clear regions with !bit_not_p, so that gaps in between stores aren't |
4075 | set in the mask. */ |
4076 | unsigned HOST_WIDE_INT bitsize = info->bitsize; |
4077 | unsigned HOST_WIDE_INT prec = bitsize; |
4078 | unsigned int pos_in_buffer = 0; |
4079 | if (any_paddings) |
4080 | { |
4081 | tree lhs = gimple_assign_lhs (gs: info->stmt); |
4082 | if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
4083 | && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize) |
4084 | prec = TYPE_PRECISION (TREE_TYPE (lhs)); |
4085 | } |
4086 | if (info->bitpos < try_bitpos) |
4087 | { |
4088 | gcc_assert (info->bitpos + bitsize > try_bitpos); |
4089 | if (!BYTES_BIG_ENDIAN) |
4090 | { |
4091 | if (prec <= try_bitpos - info->bitpos) |
4092 | continue; |
4093 | prec -= try_bitpos - info->bitpos; |
4094 | } |
4095 | bitsize -= try_bitpos - info->bitpos; |
4096 | if (BYTES_BIG_ENDIAN && prec > bitsize) |
4097 | prec = bitsize; |
4098 | } |
4099 | else |
4100 | pos_in_buffer = info->bitpos - try_bitpos; |
4101 | if (prec < bitsize) |
4102 | { |
4103 | /* If this is a bool inversion, invert just the least significant |
4104 | prec bits rather than all bits of it. */ |
4105 | if (BYTES_BIG_ENDIAN) |
4106 | { |
4107 | pos_in_buffer += bitsize - prec; |
4108 | if (pos_in_buffer >= split_store->size) |
4109 | continue; |
4110 | } |
4111 | bitsize = prec; |
4112 | } |
4113 | if (pos_in_buffer + bitsize > split_store->size) |
4114 | bitsize = split_store->size - pos_in_buffer; |
4115 | unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT); |
4116 | if (BYTES_BIG_ENDIAN) |
4117 | clear_bit_region_be (ptr: p, start: (BITS_PER_UNIT - 1 |
4118 | - (pos_in_buffer % BITS_PER_UNIT)), len: bitsize); |
4119 | else |
4120 | clear_bit_region (ptr: p, start: pos_in_buffer % BITS_PER_UNIT, len: bitsize); |
4121 | } |
4122 | for (unsigned int i = 0; i < buf_size; ++i) |
4123 | buf[i] = ~buf[i]; |
4124 | mask = native_interpret_expr (int_type, buf, buf_size); |
4125 | return BIT_XOR_EXPR; |
4126 | } |
4127 | |
4128 | /* Given a merged store group GROUP output the widened version of it. |
4129 | The store chain is against the base object BASE. |
4130 | Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output |
4131 | unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive. |
4132 | Make sure that the number of statements output is less than the number of |
4133 | original statements. If a better sequence is possible emit it and |
4134 | return true. */ |
4135 | |
4136 | bool |
4137 | imm_store_chain_info::output_merged_store (merged_store_group *group) |
4138 | { |
4139 | const unsigned HOST_WIDE_INT start_byte_pos |
4140 | = group->bitregion_start / BITS_PER_UNIT; |
4141 | unsigned int orig_num_stmts = group->stores.length (); |
4142 | if (orig_num_stmts < 2) |
4143 | return false; |
4144 | |
4145 | bool allow_unaligned_store |
4146 | = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned; |
4147 | bool allow_unaligned_load = allow_unaligned_store; |
4148 | bool bzero_first = false; |
4149 | store_immediate_info *store; |
4150 | unsigned int num_clobber_stmts = 0; |
4151 | if (group->stores[0]->rhs_code == INTEGER_CST) |
4152 | { |
4153 | unsigned int i; |
4154 | FOR_EACH_VEC_ELT (group->stores, i, store) |
4155 | if (gimple_clobber_p (s: store->stmt)) |
4156 | num_clobber_stmts++; |
4157 | else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR |
4158 | && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0 |
4159 | && group->start == store->bitpos |
4160 | && group->width == store->bitsize |
4161 | && (group->start % BITS_PER_UNIT) == 0 |
4162 | && (group->width % BITS_PER_UNIT) == 0) |
4163 | { |
4164 | bzero_first = true; |
4165 | break; |
4166 | } |
4167 | else |
4168 | break; |
4169 | FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i) |
4170 | if (gimple_clobber_p (s: store->stmt)) |
4171 | num_clobber_stmts++; |
4172 | if (num_clobber_stmts == orig_num_stmts) |
4173 | return false; |
4174 | orig_num_stmts -= num_clobber_stmts; |
4175 | } |
4176 | if (allow_unaligned_store || bzero_first) |
4177 | { |
4178 | /* If unaligned stores are allowed, see how many stores we'd emit |
4179 | for unaligned and how many stores we'd emit for aligned stores. |
4180 | Only use unaligned stores if it allows fewer stores than aligned. |
4181 | Similarly, if there is a whole region clear first, prefer expanding |
4182 | it together compared to expanding clear first followed by merged |
4183 | further stores. */ |
4184 | unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U }; |
4185 | int pass_min = 0; |
4186 | for (int pass = 0; pass < 4; ++pass) |
4187 | { |
4188 | if (!allow_unaligned_store && (pass & 1) != 0) |
4189 | continue; |
4190 | if (!bzero_first && (pass & 2) != 0) |
4191 | continue; |
4192 | cnt[pass] = split_group (group, allow_unaligned_store: (pass & 1) != 0, |
4193 | allow_unaligned_load, bzero_first: (pass & 2) != 0, |
4194 | NULL, NULL, NULL); |
4195 | if (cnt[pass] < cnt[pass_min]) |
4196 | pass_min = pass; |
4197 | } |
4198 | if ((pass_min & 1) == 0) |
4199 | allow_unaligned_store = false; |
4200 | if ((pass_min & 2) == 0) |
4201 | bzero_first = false; |
4202 | } |
4203 | |
4204 | auto_vec<class split_store *, 32> split_stores; |
4205 | split_store *split_store; |
4206 | unsigned total_orig, total_new, i; |
4207 | split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first, |
4208 | split_stores: &split_stores, total_orig: &total_orig, total_new: &total_new); |
4209 | |
4210 | /* Determine if there is a clobber covering the whole group at the start, |
4211 | followed by proposed split stores that cover the whole group. In that |
4212 | case, prefer the transformation even if |
4213 | split_stores.length () == orig_num_stmts. */ |
4214 | bool clobber_first = false; |
4215 | if (num_clobber_stmts |
4216 | && gimple_clobber_p (s: group->stores[0]->stmt) |
4217 | && group->start == group->stores[0]->bitpos |
4218 | && group->width == group->stores[0]->bitsize |
4219 | && (group->start % BITS_PER_UNIT) == 0 |
4220 | && (group->width % BITS_PER_UNIT) == 0) |
4221 | { |
4222 | clobber_first = true; |
4223 | unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT; |
4224 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4225 | if (split_store->bytepos != pos) |
4226 | { |
4227 | clobber_first = false; |
4228 | break; |
4229 | } |
4230 | else |
4231 | pos += split_store->size / BITS_PER_UNIT; |
4232 | if (pos != (group->start + group->width) / BITS_PER_UNIT) |
4233 | clobber_first = false; |
4234 | } |
4235 | |
4236 | if (split_stores.length () >= orig_num_stmts + clobber_first) |
4237 | { |
4238 | |
4239 | /* We didn't manage to reduce the number of statements. Bail out. */ |
4240 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4241 | fprintf (stream: dump_file, format: "Exceeded original number of stmts (%u)." |
4242 | " Not profitable to emit new sequence.\n" , |
4243 | orig_num_stmts); |
4244 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4245 | delete split_store; |
4246 | return false; |
4247 | } |
4248 | if (total_orig <= total_new) |
4249 | { |
4250 | /* If number of estimated new statements is above estimated original |
4251 | statements, bail out too. */ |
4252 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4253 | fprintf (stream: dump_file, format: "Estimated number of original stmts (%u)" |
4254 | " not larger than estimated number of new" |
4255 | " stmts (%u).\n" , |
4256 | total_orig, total_new); |
4257 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4258 | delete split_store; |
4259 | return false; |
4260 | } |
4261 | if (group->stores[0]->rhs_code == INTEGER_CST) |
4262 | { |
4263 | bool all_orig = true; |
4264 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4265 | if (!split_store->orig) |
4266 | { |
4267 | all_orig = false; |
4268 | break; |
4269 | } |
4270 | if (all_orig) |
4271 | { |
4272 | unsigned int cnt = split_stores.length (); |
4273 | store_immediate_info *store; |
4274 | FOR_EACH_VEC_ELT (group->stores, i, store) |
4275 | if (gimple_clobber_p (s: store->stmt)) |
4276 | ++cnt; |
4277 | /* Punt if we wouldn't make any real changes, i.e. keep all |
4278 | orig stmts + all clobbers. */ |
4279 | if (cnt == group->stores.length ()) |
4280 | { |
4281 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4282 | fprintf (stream: dump_file, format: "Exceeded original number of stmts (%u)." |
4283 | " Not profitable to emit new sequence.\n" , |
4284 | orig_num_stmts); |
4285 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4286 | delete split_store; |
4287 | return false; |
4288 | } |
4289 | } |
4290 | } |
4291 | |
4292 | gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt); |
4293 | gimple_seq seq = NULL; |
4294 | tree last_vdef, new_vuse; |
4295 | last_vdef = gimple_vdef (g: group->last_stmt); |
4296 | new_vuse = gimple_vuse (g: group->last_stmt); |
4297 | tree bswap_res = NULL_TREE; |
4298 | |
4299 | /* Clobbers are not removed. */ |
4300 | if (gimple_clobber_p (s: group->last_stmt)) |
4301 | { |
4302 | new_vuse = make_ssa_name (var: gimple_vop (cfun), stmt: group->last_stmt); |
4303 | gimple_set_vdef (g: group->last_stmt, vdef: new_vuse); |
4304 | } |
4305 | |
4306 | if (group->stores[0]->rhs_code == LROTATE_EXPR |
4307 | || group->stores[0]->rhs_code == NOP_EXPR) |
4308 | { |
4309 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; |
4310 | gimple *ins_stmt = group->stores[0]->ins_stmt; |
4311 | struct symbolic_number *n = &group->stores[0]->n; |
4312 | bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR; |
4313 | |
4314 | switch (n->range) |
4315 | { |
4316 | case 16: |
4317 | load_type = bswap_type = uint16_type_node; |
4318 | break; |
4319 | case 32: |
4320 | load_type = uint32_type_node; |
4321 | if (bswap) |
4322 | { |
4323 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP32); |
4324 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
4325 | } |
4326 | break; |
4327 | case 64: |
4328 | load_type = uint64_type_node; |
4329 | if (bswap) |
4330 | { |
4331 | fndecl = builtin_decl_explicit (fncode: BUILT_IN_BSWAP64); |
4332 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); |
4333 | } |
4334 | break; |
4335 | default: |
4336 | gcc_unreachable (); |
4337 | } |
4338 | |
4339 | /* If the loads have each vuse of the corresponding store, |
4340 | we've checked the aliasing already in try_coalesce_bswap and |
4341 | we want to sink the need load into seq. So need to use new_vuse |
4342 | on the load. */ |
4343 | if (n->base_addr) |
4344 | { |
4345 | if (n->vuse == NULL) |
4346 | { |
4347 | n->vuse = new_vuse; |
4348 | ins_stmt = NULL; |
4349 | } |
4350 | else |
4351 | /* Update vuse in case it has changed by output_merged_stores. */ |
4352 | n->vuse = gimple_vuse (g: ins_stmt); |
4353 | } |
4354 | bswap_res = bswap_replace (gsi: gsi_start (seq), ins_stmt, fndecl, |
4355 | bswap_type, load_type, n, bswap, |
4356 | mask: ~(uint64_t) 0, l_rotate: 0); |
4357 | gcc_assert (bswap_res); |
4358 | } |
4359 | |
4360 | gimple *stmt = NULL; |
4361 | auto_vec<gimple *, 32> orig_stmts; |
4362 | gimple_seq this_seq; |
4363 | tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq, |
4364 | is_gimple_mem_ref_addr, NULL_TREE); |
4365 | gimple_seq_add_seq_without_update (&seq, this_seq); |
4366 | |
4367 | tree load_addr[2] = { NULL_TREE, NULL_TREE }; |
4368 | gimple_seq load_seq[2] = { NULL, NULL }; |
4369 | gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () }; |
4370 | for (int j = 0; j < 2; ++j) |
4371 | { |
4372 | store_operand_info &op = group->stores[0]->ops[j]; |
4373 | if (op.base_addr == NULL_TREE) |
4374 | continue; |
4375 | |
4376 | store_immediate_info *infol = group->stores.last (); |
4377 | if (gimple_vuse (g: op.stmt) == gimple_vuse (g: infol->ops[j].stmt)) |
4378 | { |
4379 | /* We can't pick the location randomly; while we've verified |
4380 | all the loads have the same vuse, they can be still in different |
4381 | basic blocks and we need to pick the one from the last bb: |
4382 | int x = q[0]; |
4383 | if (x == N) return; |
4384 | int y = q[1]; |
4385 | p[0] = x; |
4386 | p[1] = y; |
4387 | otherwise if we put the wider load at the q[0] load, we might |
4388 | segfault if q[1] is not mapped. */ |
4389 | basic_block bb = gimple_bb (g: op.stmt); |
4390 | gimple *ostmt = op.stmt; |
4391 | store_immediate_info *info; |
4392 | FOR_EACH_VEC_ELT (group->stores, i, info) |
4393 | { |
4394 | gimple *tstmt = info->ops[j].stmt; |
4395 | basic_block tbb = gimple_bb (g: tstmt); |
4396 | if (dominated_by_p (CDI_DOMINATORS, tbb, bb)) |
4397 | { |
4398 | ostmt = tstmt; |
4399 | bb = tbb; |
4400 | } |
4401 | } |
4402 | load_gsi[j] = gsi_for_stmt (ostmt); |
4403 | load_addr[j] |
4404 | = force_gimple_operand_1 (unshare_expr (op.base_addr), |
4405 | &load_seq[j], is_gimple_mem_ref_addr, |
4406 | NULL_TREE); |
4407 | } |
4408 | else if (operand_equal_p (base_addr, op.base_addr, flags: 0)) |
4409 | load_addr[j] = addr; |
4410 | else |
4411 | { |
4412 | load_addr[j] |
4413 | = force_gimple_operand_1 (unshare_expr (op.base_addr), |
4414 | &this_seq, is_gimple_mem_ref_addr, |
4415 | NULL_TREE); |
4416 | gimple_seq_add_seq_without_update (&seq, this_seq); |
4417 | } |
4418 | } |
4419 | |
4420 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4421 | { |
4422 | const unsigned HOST_WIDE_INT try_size = split_store->size; |
4423 | const unsigned HOST_WIDE_INT try_pos = split_store->bytepos; |
4424 | const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
4425 | const unsigned HOST_WIDE_INT try_align = split_store->align; |
4426 | const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos; |
4427 | tree dest, src; |
4428 | location_t loc; |
4429 | |
4430 | if (split_store->orig) |
4431 | { |
4432 | /* If there is just a single non-clobber constituent store |
4433 | which covers the whole area, just reuse the lhs and rhs. */ |
4434 | gimple *orig_stmt = NULL; |
4435 | store_immediate_info *store; |
4436 | unsigned int j; |
4437 | FOR_EACH_VEC_ELT (split_store->orig_stores, j, store) |
4438 | if (!gimple_clobber_p (s: store->stmt)) |
4439 | { |
4440 | orig_stmt = store->stmt; |
4441 | break; |
4442 | } |
4443 | dest = gimple_assign_lhs (gs: orig_stmt); |
4444 | src = gimple_assign_rhs1 (gs: orig_stmt); |
4445 | loc = gimple_location (g: orig_stmt); |
4446 | } |
4447 | else |
4448 | { |
4449 | store_immediate_info *info; |
4450 | unsigned short clique, base; |
4451 | unsigned int k; |
4452 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) |
4453 | orig_stmts.safe_push (obj: info->stmt); |
4454 | tree offset_type |
4455 | = get_alias_type_for_stmts (stmts&: orig_stmts, is_load: false, cliquep: &clique, basep: &base); |
4456 | tree dest_type; |
4457 | loc = get_location_for_stmts (stmts&: orig_stmts); |
4458 | orig_stmts.truncate (size: 0); |
4459 | |
4460 | if (group->string_concatenation) |
4461 | dest_type |
4462 | = build_array_type_nelts (char_type_node, |
4463 | try_size / BITS_PER_UNIT); |
4464 | else |
4465 | { |
4466 | dest_type = build_nonstandard_integer_type (try_size, UNSIGNED); |
4467 | dest_type = build_aligned_type (dest_type, try_align); |
4468 | } |
4469 | dest = fold_build2 (MEM_REF, dest_type, addr, |
4470 | build_int_cst (offset_type, try_pos)); |
4471 | if (TREE_CODE (dest) == MEM_REF) |
4472 | { |
4473 | MR_DEPENDENCE_CLIQUE (dest) = clique; |
4474 | MR_DEPENDENCE_BASE (dest) = base; |
4475 | } |
4476 | |
4477 | tree mask; |
4478 | if (bswap_res || group->string_concatenation) |
4479 | mask = integer_zero_node; |
4480 | else |
4481 | mask = native_interpret_expr (dest_type, |
4482 | group->mask + try_offset, |
4483 | group->buf_size); |
4484 | |
4485 | tree ops[2]; |
4486 | for (int j = 0; |
4487 | j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE); |
4488 | ++j) |
4489 | { |
4490 | store_operand_info &op = split_store->orig_stores[0]->ops[j]; |
4491 | if (bswap_res) |
4492 | ops[j] = bswap_res; |
4493 | else if (group->string_concatenation) |
4494 | { |
4495 | ops[j] = build_string (try_size / BITS_PER_UNIT, |
4496 | (const char *) group->val + try_offset); |
4497 | TREE_TYPE (ops[j]) = dest_type; |
4498 | } |
4499 | else if (op.base_addr) |
4500 | { |
4501 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) |
4502 | orig_stmts.safe_push (obj: info->ops[j].stmt); |
4503 | |
4504 | offset_type = get_alias_type_for_stmts (stmts&: orig_stmts, is_load: true, |
4505 | cliquep: &clique, basep: &base); |
4506 | location_t load_loc = get_location_for_stmts (stmts&: orig_stmts); |
4507 | orig_stmts.truncate (size: 0); |
4508 | |
4509 | unsigned HOST_WIDE_INT load_align = group->load_align[j]; |
4510 | unsigned HOST_WIDE_INT align_bitpos |
4511 | = known_alignment (a: try_bitpos |
4512 | - split_store->orig_stores[0]->bitpos |
4513 | + op.bitpos); |
4514 | if (align_bitpos & (load_align - 1)) |
4515 | load_align = least_bit_hwi (x: align_bitpos); |
4516 | |
4517 | tree load_int_type |
4518 | = build_nonstandard_integer_type (try_size, UNSIGNED); |
4519 | load_int_type |
4520 | = build_aligned_type (load_int_type, load_align); |
4521 | |
4522 | poly_uint64 load_pos |
4523 | = exact_div (a: try_bitpos |
4524 | - split_store->orig_stores[0]->bitpos |
4525 | + op.bitpos, |
4526 | BITS_PER_UNIT); |
4527 | ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j], |
4528 | build_int_cst (offset_type, load_pos)); |
4529 | if (TREE_CODE (ops[j]) == MEM_REF) |
4530 | { |
4531 | MR_DEPENDENCE_CLIQUE (ops[j]) = clique; |
4532 | MR_DEPENDENCE_BASE (ops[j]) = base; |
4533 | } |
4534 | if (!integer_zerop (mask)) |
4535 | { |
4536 | /* The load might load some bits (that will be masked |
4537 | off later on) uninitialized, avoid -W*uninitialized |
4538 | warnings in that case. */ |
4539 | suppress_warning (ops[j], OPT_Wuninitialized); |
4540 | } |
4541 | |
4542 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), ops[j]); |
4543 | gimple_set_location (g: stmt, location: load_loc); |
4544 | if (gsi_bb (i: load_gsi[j])) |
4545 | { |
4546 | gimple_set_vuse (g: stmt, vuse: gimple_vuse (g: op.stmt)); |
4547 | gimple_seq_add_stmt_without_update (&load_seq[j], stmt); |
4548 | } |
4549 | else |
4550 | { |
4551 | gimple_set_vuse (g: stmt, vuse: new_vuse); |
4552 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4553 | } |
4554 | ops[j] = gimple_assign_lhs (gs: stmt); |
4555 | tree xor_mask; |
4556 | enum tree_code inv_op |
4557 | = invert_op (split_store, idx: j, int_type: dest_type, mask&: xor_mask); |
4558 | if (inv_op != NOP_EXPR) |
4559 | { |
4560 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4561 | inv_op, ops[j], xor_mask); |
4562 | gimple_set_location (g: stmt, location: load_loc); |
4563 | ops[j] = gimple_assign_lhs (gs: stmt); |
4564 | |
4565 | if (gsi_bb (i: load_gsi[j])) |
4566 | gimple_seq_add_stmt_without_update (&load_seq[j], |
4567 | stmt); |
4568 | else |
4569 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4570 | } |
4571 | } |
4572 | else |
4573 | ops[j] = native_interpret_expr (dest_type, |
4574 | group->val + try_offset, |
4575 | group->buf_size); |
4576 | } |
4577 | |
4578 | switch (split_store->orig_stores[0]->rhs_code) |
4579 | { |
4580 | case BIT_AND_EXPR: |
4581 | case BIT_IOR_EXPR: |
4582 | case BIT_XOR_EXPR: |
4583 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) |
4584 | { |
4585 | tree rhs1 = gimple_assign_rhs1 (gs: info->stmt); |
4586 | orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1)); |
4587 | } |
4588 | location_t bit_loc; |
4589 | bit_loc = get_location_for_stmts (stmts&: orig_stmts); |
4590 | orig_stmts.truncate (size: 0); |
4591 | |
4592 | stmt |
4593 | = gimple_build_assign (make_ssa_name (var: dest_type), |
4594 | split_store->orig_stores[0]->rhs_code, |
4595 | ops[0], ops[1]); |
4596 | gimple_set_location (g: stmt, location: bit_loc); |
4597 | /* If there is just one load and there is a separate |
4598 | load_seq[0], emit the bitwise op right after it. */ |
4599 | if (load_addr[1] == NULL_TREE && gsi_bb (i: load_gsi[0])) |
4600 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); |
4601 | /* Otherwise, if at least one load is in seq, we need to |
4602 | emit the bitwise op right before the store. If there |
4603 | are two loads and are emitted somewhere else, it would |
4604 | be better to emit the bitwise op as early as possible; |
4605 | we don't track where that would be possible right now |
4606 | though. */ |
4607 | else |
4608 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4609 | src = gimple_assign_lhs (gs: stmt); |
4610 | tree xor_mask; |
4611 | enum tree_code inv_op; |
4612 | inv_op = invert_op (split_store, idx: 2, int_type: dest_type, mask&: xor_mask); |
4613 | if (inv_op != NOP_EXPR) |
4614 | { |
4615 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4616 | inv_op, src, xor_mask); |
4617 | gimple_set_location (g: stmt, location: bit_loc); |
4618 | if (load_addr[1] == NULL_TREE && gsi_bb (i: load_gsi[0])) |
4619 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); |
4620 | else |
4621 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4622 | src = gimple_assign_lhs (gs: stmt); |
4623 | } |
4624 | break; |
4625 | case LROTATE_EXPR: |
4626 | case NOP_EXPR: |
4627 | src = ops[0]; |
4628 | if (!is_gimple_val (src)) |
4629 | { |
4630 | stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)), |
4631 | src); |
4632 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4633 | src = gimple_assign_lhs (gs: stmt); |
4634 | } |
4635 | if (!useless_type_conversion_p (dest_type, TREE_TYPE (src))) |
4636 | { |
4637 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4638 | NOP_EXPR, src); |
4639 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4640 | src = gimple_assign_lhs (gs: stmt); |
4641 | } |
4642 | inv_op = invert_op (split_store, idx: 2, int_type: dest_type, mask&: xor_mask); |
4643 | if (inv_op != NOP_EXPR) |
4644 | { |
4645 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4646 | inv_op, src, xor_mask); |
4647 | gimple_set_location (g: stmt, location: loc); |
4648 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4649 | src = gimple_assign_lhs (gs: stmt); |
4650 | } |
4651 | break; |
4652 | default: |
4653 | src = ops[0]; |
4654 | break; |
4655 | } |
4656 | |
4657 | /* If bit insertion is required, we use the source as an accumulator |
4658 | into which the successive bit-field values are manually inserted. |
4659 | FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */ |
4660 | if (group->bit_insertion) |
4661 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) |
4662 | if (info->rhs_code == BIT_INSERT_EXPR |
4663 | && info->bitpos < try_bitpos + try_size |
4664 | && info->bitpos + info->bitsize > try_bitpos) |
4665 | { |
4666 | /* Mask, truncate, convert to final type, shift and ior into |
4667 | the accumulator. Note that every step can be a no-op. */ |
4668 | const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos; |
4669 | const HOST_WIDE_INT end_gap |
4670 | = (try_bitpos + try_size) - (info->bitpos + info->bitsize); |
4671 | tree tem = info->ops[0].val; |
4672 | if (!INTEGRAL_TYPE_P (TREE_TYPE (tem))) |
4673 | { |
4674 | const unsigned HOST_WIDE_INT size |
4675 | = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem))); |
4676 | tree integer_type |
4677 | = build_nonstandard_integer_type (size, UNSIGNED); |
4678 | tem = gimple_build (seq: &seq, loc, code: VIEW_CONVERT_EXPR, |
4679 | type: integer_type, ops: tem); |
4680 | } |
4681 | if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize) |
4682 | { |
4683 | tree bitfield_type |
4684 | = build_nonstandard_integer_type (info->bitsize, |
4685 | UNSIGNED); |
4686 | tem = gimple_convert (seq: &seq, loc, type: bitfield_type, op: tem); |
4687 | } |
4688 | else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0) |
4689 | { |
4690 | wide_int imask |
4691 | = wi::mask (width: info->bitsize, negate_p: false, |
4692 | TYPE_PRECISION (TREE_TYPE (tem))); |
4693 | tem = gimple_build (seq: &seq, loc, |
4694 | code: BIT_AND_EXPR, TREE_TYPE (tem), ops: tem, |
4695 | ops: wide_int_to_tree (TREE_TYPE (tem), |
4696 | cst: imask)); |
4697 | } |
4698 | const HOST_WIDE_INT shift |
4699 | = (BYTES_BIG_ENDIAN ? end_gap : start_gap); |
4700 | if (shift < 0) |
4701 | tem = gimple_build (seq: &seq, loc, |
4702 | code: RSHIFT_EXPR, TREE_TYPE (tem), ops: tem, |
4703 | ops: build_int_cst (NULL_TREE, -shift)); |
4704 | tem = gimple_convert (seq: &seq, loc, type: dest_type, op: tem); |
4705 | if (shift > 0) |
4706 | tem = gimple_build (seq: &seq, loc, |
4707 | code: LSHIFT_EXPR, type: dest_type, ops: tem, |
4708 | ops: build_int_cst (NULL_TREE, shift)); |
4709 | src = gimple_build (seq: &seq, loc, |
4710 | code: BIT_IOR_EXPR, type: dest_type, ops: tem, ops: src); |
4711 | } |
4712 | |
4713 | if (!integer_zerop (mask)) |
4714 | { |
4715 | tree tem = make_ssa_name (var: dest_type); |
4716 | tree load_src = unshare_expr (dest); |
4717 | /* The load might load some or all bits uninitialized, |
4718 | avoid -W*uninitialized warnings in that case. |
4719 | As optimization, it would be nice if all the bits are |
4720 | provably uninitialized (no stores at all yet or previous |
4721 | store a CLOBBER) we'd optimize away the load and replace |
4722 | it e.g. with 0. */ |
4723 | suppress_warning (load_src, OPT_Wuninitialized); |
4724 | stmt = gimple_build_assign (tem, load_src); |
4725 | gimple_set_location (g: stmt, location: loc); |
4726 | gimple_set_vuse (g: stmt, vuse: new_vuse); |
4727 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4728 | |
4729 | /* FIXME: If there is a single chunk of zero bits in mask, |
4730 | perhaps use BIT_INSERT_EXPR instead? */ |
4731 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4732 | BIT_AND_EXPR, tem, mask); |
4733 | gimple_set_location (g: stmt, location: loc); |
4734 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4735 | tem = gimple_assign_lhs (gs: stmt); |
4736 | |
4737 | if (TREE_CODE (src) == INTEGER_CST) |
4738 | src = wide_int_to_tree (type: dest_type, |
4739 | cst: wi::bit_and_not (x: wi::to_wide (t: src), |
4740 | y: wi::to_wide (t: mask))); |
4741 | else |
4742 | { |
4743 | tree nmask |
4744 | = wide_int_to_tree (type: dest_type, |
4745 | cst: wi::bit_not (x: wi::to_wide (t: mask))); |
4746 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4747 | BIT_AND_EXPR, src, nmask); |
4748 | gimple_set_location (g: stmt, location: loc); |
4749 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4750 | src = gimple_assign_lhs (gs: stmt); |
4751 | } |
4752 | stmt = gimple_build_assign (make_ssa_name (var: dest_type), |
4753 | BIT_IOR_EXPR, tem, src); |
4754 | gimple_set_location (g: stmt, location: loc); |
4755 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4756 | src = gimple_assign_lhs (gs: stmt); |
4757 | } |
4758 | } |
4759 | |
4760 | stmt = gimple_build_assign (dest, src); |
4761 | gimple_set_location (g: stmt, location: loc); |
4762 | gimple_set_vuse (g: stmt, vuse: new_vuse); |
4763 | gimple_seq_add_stmt_without_update (&seq, stmt); |
4764 | |
4765 | if (group->lp_nr && stmt_could_throw_p (cfun, stmt)) |
4766 | add_stmt_to_eh_lp (stmt, group->lp_nr); |
4767 | |
4768 | tree new_vdef; |
4769 | if (i < split_stores.length () - 1) |
4770 | new_vdef = make_ssa_name (var: gimple_vop (cfun), stmt); |
4771 | else |
4772 | new_vdef = last_vdef; |
4773 | |
4774 | gimple_set_vdef (g: stmt, vdef: new_vdef); |
4775 | SSA_NAME_DEF_STMT (new_vdef) = stmt; |
4776 | new_vuse = new_vdef; |
4777 | } |
4778 | |
4779 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
4780 | delete split_store; |
4781 | |
4782 | gcc_assert (seq); |
4783 | if (dump_file) |
4784 | { |
4785 | fprintf (stream: dump_file, |
4786 | format: "New sequence of %u stores to replace old one of %u stores\n" , |
4787 | split_stores.length (), orig_num_stmts); |
4788 | if (dump_flags & TDF_DETAILS) |
4789 | print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS); |
4790 | } |
4791 | |
4792 | if (gimple_clobber_p (s: group->last_stmt)) |
4793 | update_stmt (s: group->last_stmt); |
4794 | |
4795 | if (group->lp_nr > 0) |
4796 | { |
4797 | /* We're going to insert a sequence of (potentially) throwing stores |
4798 | into an active EH region. This means that we're going to create |
4799 | new basic blocks with EH edges pointing to the post landing pad |
4800 | and, therefore, to have to update its PHI nodes, if any. For the |
4801 | virtual PHI node, we're going to use the VDEFs created above, but |
4802 | for the other nodes, we need to record the original reaching defs. */ |
4803 | eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr); |
4804 | basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad); |
4805 | basic_block last_bb = gimple_bb (g: group->last_stmt); |
4806 | edge last_edge = find_edge (last_bb, lp_bb); |
4807 | auto_vec<tree, 16> last_defs; |
4808 | gphi_iterator gpi; |
4809 | for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (i: gpi); gsi_next (i: &gpi)) |
4810 | { |
4811 | gphi *phi = gpi.phi (); |
4812 | tree last_def; |
4813 | if (virtual_operand_p (op: gimple_phi_result (gs: phi))) |
4814 | last_def = NULL_TREE; |
4815 | else |
4816 | last_def = gimple_phi_arg_def (gs: phi, index: last_edge->dest_idx); |
4817 | last_defs.safe_push (obj: last_def); |
4818 | } |
4819 | |
4820 | /* Do the insertion. Then, if new basic blocks have been created in the |
4821 | process, rewind the chain of VDEFs create above to walk the new basic |
4822 | blocks and update the corresponding arguments of the PHI nodes. */ |
4823 | update_modified_stmts (seq); |
4824 | if (gimple_find_sub_bbs (seq, &last_gsi)) |
4825 | while (last_vdef != gimple_vuse (g: group->last_stmt)) |
4826 | { |
4827 | gimple *stmt = SSA_NAME_DEF_STMT (last_vdef); |
4828 | if (stmt_could_throw_p (cfun, stmt)) |
4829 | { |
4830 | edge new_edge = find_edge (gimple_bb (g: stmt), lp_bb); |
4831 | unsigned int i; |
4832 | for (gpi = gsi_start_phis (lp_bb), i = 0; |
4833 | !gsi_end_p (i: gpi); |
4834 | gsi_next (i: &gpi), i++) |
4835 | { |
4836 | gphi *phi = gpi.phi (); |
4837 | tree new_def; |
4838 | if (virtual_operand_p (op: gimple_phi_result (gs: phi))) |
4839 | new_def = last_vdef; |
4840 | else |
4841 | new_def = last_defs[i]; |
4842 | add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION); |
4843 | } |
4844 | } |
4845 | last_vdef = gimple_vuse (g: stmt); |
4846 | } |
4847 | } |
4848 | else |
4849 | gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT); |
4850 | |
4851 | for (int j = 0; j < 2; ++j) |
4852 | if (load_seq[j]) |
4853 | gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT); |
4854 | |
4855 | return true; |
4856 | } |
4857 | |
4858 | /* Process the merged_store_group objects created in the coalescing phase. |
4859 | The stores are all against the base object BASE. |
4860 | Try to output the widened stores and delete the original statements if |
4861 | successful. Return true iff any changes were made. */ |
4862 | |
4863 | bool |
4864 | imm_store_chain_info::output_merged_stores () |
4865 | { |
4866 | unsigned int i; |
4867 | merged_store_group *merged_store; |
4868 | bool ret = false; |
4869 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store) |
4870 | { |
4871 | if (dbg_cnt (index: store_merging) |
4872 | && output_merged_store (group: merged_store)) |
4873 | { |
4874 | unsigned int j; |
4875 | store_immediate_info *store; |
4876 | FOR_EACH_VEC_ELT (merged_store->stores, j, store) |
4877 | { |
4878 | gimple *stmt = store->stmt; |
4879 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
4880 | /* Don't remove clobbers, they are still useful even if |
4881 | everything is overwritten afterwards. */ |
4882 | if (gimple_clobber_p (s: stmt)) |
4883 | continue; |
4884 | gsi_remove (&gsi, true); |
4885 | if (store->lp_nr) |
4886 | remove_stmt_from_eh_lp (stmt); |
4887 | if (stmt != merged_store->last_stmt) |
4888 | { |
4889 | unlink_stmt_vdef (stmt); |
4890 | release_defs (stmt); |
4891 | } |
4892 | } |
4893 | ret = true; |
4894 | } |
4895 | } |
4896 | if (ret && dump_file) |
4897 | fprintf (stream: dump_file, format: "Merging successful!\n" ); |
4898 | |
4899 | return ret; |
4900 | } |
4901 | |
4902 | /* Coalesce the store_immediate_info objects recorded against the base object |
4903 | BASE in the first phase and output them. |
4904 | Delete the allocated structures. |
4905 | Return true if any changes were made. */ |
4906 | |
4907 | bool |
4908 | imm_store_chain_info::terminate_and_process_chain () |
4909 | { |
4910 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4911 | fprintf (stream: dump_file, format: "Terminating chain with %u stores\n" , |
4912 | m_store_info.length ()); |
4913 | /* Process store chain. */ |
4914 | bool ret = false; |
4915 | if (m_store_info.length () > 1) |
4916 | { |
4917 | ret = coalesce_immediate_stores (); |
4918 | if (ret) |
4919 | ret = output_merged_stores (); |
4920 | } |
4921 | |
4922 | /* Delete all the entries we allocated ourselves. */ |
4923 | store_immediate_info *info; |
4924 | unsigned int i; |
4925 | FOR_EACH_VEC_ELT (m_store_info, i, info) |
4926 | delete info; |
4927 | |
4928 | merged_store_group *merged_info; |
4929 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info) |
4930 | delete merged_info; |
4931 | |
4932 | return ret; |
4933 | } |
4934 | |
4935 | /* Return true iff LHS is a destination potentially interesting for |
4936 | store merging. In practice these are the codes that get_inner_reference |
4937 | can process. */ |
4938 | |
4939 | static bool |
4940 | lhs_valid_for_store_merging_p (tree lhs) |
4941 | { |
4942 | if (DECL_P (lhs)) |
4943 | return true; |
4944 | |
4945 | switch (TREE_CODE (lhs)) |
4946 | { |
4947 | case ARRAY_REF: |
4948 | case ARRAY_RANGE_REF: |
4949 | case BIT_FIELD_REF: |
4950 | case COMPONENT_REF: |
4951 | case MEM_REF: |
4952 | case VIEW_CONVERT_EXPR: |
4953 | return true; |
4954 | default: |
4955 | return false; |
4956 | } |
4957 | } |
4958 | |
4959 | /* Return true if the tree RHS is a constant we want to consider |
4960 | during store merging. In practice accept all codes that |
4961 | native_encode_expr accepts. */ |
4962 | |
4963 | static bool |
4964 | rhs_valid_for_store_merging_p (tree rhs) |
4965 | { |
4966 | unsigned HOST_WIDE_INT size; |
4967 | if (TREE_CODE (rhs) == CONSTRUCTOR |
4968 | && CONSTRUCTOR_NELTS (rhs) == 0 |
4969 | && TYPE_SIZE_UNIT (TREE_TYPE (rhs)) |
4970 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs)))) |
4971 | return true; |
4972 | return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (const_value: &size) |
4973 | && native_encode_expr (rhs, NULL, size) != 0); |
4974 | } |
4975 | |
4976 | /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes |
4977 | and return true on success or false on failure. */ |
4978 | |
4979 | static bool |
4980 | adjust_bit_pos (poly_offset_int byte_off, |
4981 | poly_int64 *pbitpos, |
4982 | poly_uint64 *pbitregion_start, |
4983 | poly_uint64 *pbitregion_end) |
4984 | { |
4985 | poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT; |
4986 | bit_off += *pbitpos; |
4987 | |
4988 | if (known_ge (bit_off, 0) && bit_off.to_shwi (r: pbitpos)) |
4989 | { |
4990 | if (maybe_ne (a: *pbitregion_end, b: 0U)) |
4991 | { |
4992 | bit_off = byte_off << LOG2_BITS_PER_UNIT; |
4993 | bit_off += *pbitregion_start; |
4994 | if (bit_off.to_uhwi (r: pbitregion_start)) |
4995 | { |
4996 | bit_off = byte_off << LOG2_BITS_PER_UNIT; |
4997 | bit_off += *pbitregion_end; |
4998 | if (!bit_off.to_uhwi (r: pbitregion_end)) |
4999 | *pbitregion_end = 0; |
5000 | } |
5001 | else |
5002 | *pbitregion_end = 0; |
5003 | } |
5004 | return true; |
5005 | } |
5006 | else |
5007 | return false; |
5008 | } |
5009 | |
5010 | /* If MEM is a memory reference usable for store merging (either as |
5011 | store destination or for loads), return the non-NULL base_addr |
5012 | and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END. |
5013 | Otherwise return NULL, *PBITPOS should be still valid even for that |
5014 | case. */ |
5015 | |
5016 | static tree |
5017 | mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize, |
5018 | poly_uint64 *pbitpos, |
5019 | poly_uint64 *pbitregion_start, |
5020 | poly_uint64 *pbitregion_end) |
5021 | { |
5022 | poly_int64 bitsize, bitpos; |
5023 | poly_uint64 bitregion_start = 0, bitregion_end = 0; |
5024 | machine_mode mode; |
5025 | int unsignedp = 0, reversep = 0, volatilep = 0; |
5026 | tree offset; |
5027 | tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode, |
5028 | &unsignedp, &reversep, &volatilep); |
5029 | *pbitsize = bitsize; |
5030 | if (known_le (bitsize, 0)) |
5031 | return NULL_TREE; |
5032 | |
5033 | if (TREE_CODE (mem) == COMPONENT_REF |
5034 | && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1))) |
5035 | { |
5036 | get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset); |
5037 | if (maybe_ne (a: bitregion_end, b: 0U)) |
5038 | bitregion_end += 1; |
5039 | } |
5040 | |
5041 | if (reversep) |
5042 | return NULL_TREE; |
5043 | |
5044 | /* We do not want to rewrite TARGET_MEM_REFs. */ |
5045 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) |
5046 | return NULL_TREE; |
5047 | /* In some cases get_inner_reference may return a |
5048 | MEM_REF [ptr + byteoffset]. For the purposes of this pass |
5049 | canonicalize the base_addr to MEM_REF [ptr] and take |
5050 | byteoffset into account in the bitpos. This occurs in |
5051 | PR 23684 and this way we can catch more chains. */ |
5052 | else if (TREE_CODE (base_addr) == MEM_REF) |
5053 | { |
5054 | if (!adjust_bit_pos (byte_off: mem_ref_offset (base_addr), pbitpos: &bitpos, |
5055 | pbitregion_start: &bitregion_start, pbitregion_end: &bitregion_end)) |
5056 | return NULL_TREE; |
5057 | base_addr = TREE_OPERAND (base_addr, 0); |
5058 | } |
5059 | /* get_inner_reference returns the base object, get at its |
5060 | address now. */ |
5061 | else |
5062 | { |
5063 | if (maybe_lt (a: bitpos, b: 0)) |
5064 | return NULL_TREE; |
5065 | base_addr = build_fold_addr_expr (base_addr); |
5066 | } |
5067 | |
5068 | if (offset) |
5069 | { |
5070 | /* If the access is variable offset then a base decl has to be |
5071 | address-taken to be able to emit pointer-based stores to it. |
5072 | ??? We might be able to get away with re-using the original |
5073 | base up to the first variable part and then wrapping that inside |
5074 | a BIT_FIELD_REF. */ |
5075 | tree base = get_base_address (t: base_addr); |
5076 | if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base))) |
5077 | return NULL_TREE; |
5078 | |
5079 | /* Similarly to above for the base, remove constant from the offset. */ |
5080 | if (TREE_CODE (offset) == PLUS_EXPR |
5081 | && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST |
5082 | && adjust_bit_pos (byte_off: wi::to_poly_offset (TREE_OPERAND (offset, 1)), |
5083 | pbitpos: &bitpos, pbitregion_start: &bitregion_start, pbitregion_end: &bitregion_end)) |
5084 | offset = TREE_OPERAND (offset, 0); |
5085 | |
5086 | base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr), |
5087 | base_addr, offset); |
5088 | } |
5089 | |
5090 | if (known_eq (bitregion_end, 0U)) |
5091 | { |
5092 | bitregion_start = round_down_to_byte_boundary (bitpos); |
5093 | bitregion_end = round_up_to_byte_boundary (bitpos + bitsize); |
5094 | } |
5095 | |
5096 | *pbitsize = bitsize; |
5097 | *pbitpos = bitpos; |
5098 | *pbitregion_start = bitregion_start; |
5099 | *pbitregion_end = bitregion_end; |
5100 | return base_addr; |
5101 | } |
5102 | |
5103 | /* Return true if STMT is a load that can be used for store merging. |
5104 | In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and |
5105 | BITREGION_END are properties of the corresponding store. */ |
5106 | |
5107 | static bool |
5108 | handled_load (gimple *stmt, store_operand_info *op, |
5109 | poly_uint64 bitsize, poly_uint64 bitpos, |
5110 | poly_uint64 bitregion_start, poly_uint64 bitregion_end) |
5111 | { |
5112 | if (!is_gimple_assign (gs: stmt)) |
5113 | return false; |
5114 | if (gimple_assign_rhs_code (gs: stmt) == BIT_NOT_EXPR) |
5115 | { |
5116 | tree rhs1 = gimple_assign_rhs1 (gs: stmt); |
5117 | if (TREE_CODE (rhs1) == SSA_NAME |
5118 | && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos, |
5119 | bitregion_start, bitregion_end)) |
5120 | { |
5121 | /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have |
5122 | been optimized earlier, but if allowed here, would confuse the |
5123 | multiple uses counting. */ |
5124 | if (op->bit_not_p) |
5125 | return false; |
5126 | op->bit_not_p = !op->bit_not_p; |
5127 | return true; |
5128 | } |
5129 | return false; |
5130 | } |
5131 | if (gimple_vuse (g: stmt) |
5132 | && gimple_assign_load_p (stmt) |
5133 | && !stmt_can_throw_internal (cfun, stmt) |
5134 | && !gimple_has_volatile_ops (stmt)) |
5135 | { |
5136 | tree mem = gimple_assign_rhs1 (gs: stmt); |
5137 | op->base_addr |
5138 | = mem_valid_for_store_merging (mem, pbitsize: &op->bitsize, pbitpos: &op->bitpos, |
5139 | pbitregion_start: &op->bitregion_start, |
5140 | pbitregion_end: &op->bitregion_end); |
5141 | if (op->base_addr != NULL_TREE |
5142 | && known_eq (op->bitsize, bitsize) |
5143 | && multiple_p (a: op->bitpos - bitpos, BITS_PER_UNIT) |
5144 | && known_ge (op->bitpos - op->bitregion_start, |
5145 | bitpos - bitregion_start) |
5146 | && known_ge (op->bitregion_end - op->bitpos, |
5147 | bitregion_end - bitpos)) |
5148 | { |
5149 | op->stmt = stmt; |
5150 | op->val = mem; |
5151 | op->bit_not_p = false; |
5152 | return true; |
5153 | } |
5154 | } |
5155 | return false; |
5156 | } |
5157 | |
5158 | /* Return the index number of the landing pad for STMT, if any. */ |
5159 | |
5160 | static int |
5161 | lp_nr_for_store (gimple *stmt) |
5162 | { |
5163 | if (!cfun->can_throw_non_call_exceptions || !cfun->eh) |
5164 | return 0; |
5165 | |
5166 | if (!stmt_could_throw_p (cfun, stmt)) |
5167 | return 0; |
5168 | |
5169 | return lookup_stmt_eh_lp (stmt); |
5170 | } |
5171 | |
5172 | /* Record the store STMT for store merging optimization if it can be |
5173 | optimized. Return true if any changes were made. */ |
5174 | |
5175 | bool |
5176 | pass_store_merging::process_store (gimple *stmt) |
5177 | { |
5178 | tree lhs = gimple_assign_lhs (gs: stmt); |
5179 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
5180 | poly_uint64 bitsize, bitpos = 0; |
5181 | poly_uint64 bitregion_start = 0, bitregion_end = 0; |
5182 | tree base_addr |
5183 | = mem_valid_for_store_merging (mem: lhs, pbitsize: &bitsize, pbitpos: &bitpos, |
5184 | pbitregion_start: &bitregion_start, pbitregion_end: &bitregion_end); |
5185 | if (known_eq (bitsize, 0U)) |
5186 | return false; |
5187 | |
5188 | bool invalid = (base_addr == NULL_TREE |
5189 | || (maybe_gt (bitsize, |
5190 | (unsigned int) MAX_BITSIZE_MODE_ANY_INT) |
5191 | && TREE_CODE (rhs) != INTEGER_CST |
5192 | && (TREE_CODE (rhs) != CONSTRUCTOR |
5193 | || CONSTRUCTOR_NELTS (rhs) != 0))); |
5194 | enum tree_code rhs_code = ERROR_MARK; |
5195 | bool bit_not_p = false; |
5196 | struct symbolic_number n; |
5197 | gimple *ins_stmt = NULL; |
5198 | store_operand_info ops[2]; |
5199 | if (invalid) |
5200 | ; |
5201 | else if (TREE_CODE (rhs) == STRING_CST) |
5202 | { |
5203 | rhs_code = STRING_CST; |
5204 | ops[0].val = rhs; |
5205 | } |
5206 | else if (rhs_valid_for_store_merging_p (rhs)) |
5207 | { |
5208 | rhs_code = INTEGER_CST; |
5209 | ops[0].val = rhs; |
5210 | } |
5211 | else if (TREE_CODE (rhs) == SSA_NAME) |
5212 | { |
5213 | gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2; |
5214 | if (!is_gimple_assign (gs: def_stmt)) |
5215 | invalid = true; |
5216 | else if (handled_load (stmt: def_stmt, op: &ops[0], bitsize, bitpos, |
5217 | bitregion_start, bitregion_end)) |
5218 | rhs_code = MEM_REF; |
5219 | else if (gimple_assign_rhs_code (gs: def_stmt) == BIT_NOT_EXPR) |
5220 | { |
5221 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
5222 | if (TREE_CODE (rhs1) == SSA_NAME |
5223 | && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1))) |
5224 | { |
5225 | bit_not_p = true; |
5226 | def_stmt = SSA_NAME_DEF_STMT (rhs1); |
5227 | } |
5228 | } |
5229 | |
5230 | if (rhs_code == ERROR_MARK && !invalid) |
5231 | switch ((rhs_code = gimple_assign_rhs_code (gs: def_stmt))) |
5232 | { |
5233 | case BIT_AND_EXPR: |
5234 | case BIT_IOR_EXPR: |
5235 | case BIT_XOR_EXPR: |
5236 | tree rhs1, rhs2; |
5237 | rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
5238 | rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
5239 | invalid = true; |
5240 | if (TREE_CODE (rhs1) != SSA_NAME) |
5241 | break; |
5242 | def_stmt1 = SSA_NAME_DEF_STMT (rhs1); |
5243 | if (!is_gimple_assign (gs: def_stmt1) |
5244 | || !handled_load (stmt: def_stmt1, op: &ops[0], bitsize, bitpos, |
5245 | bitregion_start, bitregion_end)) |
5246 | break; |
5247 | if (rhs_valid_for_store_merging_p (rhs: rhs2)) |
5248 | ops[1].val = rhs2; |
5249 | else if (TREE_CODE (rhs2) != SSA_NAME) |
5250 | break; |
5251 | else |
5252 | { |
5253 | def_stmt2 = SSA_NAME_DEF_STMT (rhs2); |
5254 | if (!is_gimple_assign (gs: def_stmt2)) |
5255 | break; |
5256 | else if (!handled_load (stmt: def_stmt2, op: &ops[1], bitsize, bitpos, |
5257 | bitregion_start, bitregion_end)) |
5258 | break; |
5259 | } |
5260 | invalid = false; |
5261 | break; |
5262 | default: |
5263 | invalid = true; |
5264 | break; |
5265 | } |
5266 | |
5267 | unsigned HOST_WIDE_INT const_bitsize; |
5268 | if (bitsize.is_constant (const_value: &const_bitsize) |
5269 | && (const_bitsize % BITS_PER_UNIT) == 0 |
5270 | && const_bitsize <= 64 |
5271 | && multiple_p (a: bitpos, BITS_PER_UNIT)) |
5272 | { |
5273 | ins_stmt = find_bswap_or_nop_1 (stmt: def_stmt, n: &n, limit: 12); |
5274 | if (ins_stmt) |
5275 | { |
5276 | uint64_t nn = n.n; |
5277 | for (unsigned HOST_WIDE_INT i = 0; |
5278 | i < const_bitsize; |
5279 | i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER) |
5280 | if ((nn & MARKER_MASK) == 0 |
5281 | || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN) |
5282 | { |
5283 | ins_stmt = NULL; |
5284 | break; |
5285 | } |
5286 | if (ins_stmt) |
5287 | { |
5288 | if (invalid) |
5289 | { |
5290 | rhs_code = LROTATE_EXPR; |
5291 | ops[0].base_addr = NULL_TREE; |
5292 | ops[1].base_addr = NULL_TREE; |
5293 | } |
5294 | invalid = false; |
5295 | } |
5296 | } |
5297 | } |
5298 | |
5299 | if (invalid |
5300 | && bitsize.is_constant (const_value: &const_bitsize) |
5301 | && ((const_bitsize % BITS_PER_UNIT) != 0 |
5302 | || !multiple_p (a: bitpos, BITS_PER_UNIT)) |
5303 | && const_bitsize <= MAX_FIXED_MODE_SIZE) |
5304 | { |
5305 | /* Bypass a conversion to the bit-field type. */ |
5306 | if (!bit_not_p |
5307 | && is_gimple_assign (gs: def_stmt) |
5308 | && CONVERT_EXPR_CODE_P (rhs_code)) |
5309 | { |
5310 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
5311 | if (TREE_CODE (rhs1) == SSA_NAME |
5312 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
5313 | rhs = rhs1; |
5314 | } |
5315 | rhs_code = BIT_INSERT_EXPR; |
5316 | bit_not_p = false; |
5317 | ops[0].val = rhs; |
5318 | ops[0].base_addr = NULL_TREE; |
5319 | ops[1].base_addr = NULL_TREE; |
5320 | invalid = false; |
5321 | } |
5322 | } |
5323 | else |
5324 | invalid = true; |
5325 | |
5326 | unsigned HOST_WIDE_INT const_bitsize, const_bitpos; |
5327 | unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end; |
5328 | if (invalid |
5329 | || !bitsize.is_constant (const_value: &const_bitsize) |
5330 | || !bitpos.is_constant (const_value: &const_bitpos) |
5331 | || !bitregion_start.is_constant (const_value: &const_bitregion_start) |
5332 | || !bitregion_end.is_constant (const_value: &const_bitregion_end)) |
5333 | return terminate_all_aliasing_chains (NULL, stmt); |
5334 | |
5335 | if (!ins_stmt) |
5336 | memset (s: &n, c: 0, n: sizeof (n)); |
5337 | |
5338 | class imm_store_chain_info **chain_info = NULL; |
5339 | bool ret = false; |
5340 | if (base_addr) |
5341 | chain_info = m_stores.get (k: base_addr); |
5342 | |
5343 | store_immediate_info *info; |
5344 | if (chain_info) |
5345 | { |
5346 | unsigned int ord = (*chain_info)->m_store_info.length (); |
5347 | info = new store_immediate_info (const_bitsize, const_bitpos, |
5348 | const_bitregion_start, |
5349 | const_bitregion_end, |
5350 | stmt, ord, rhs_code, n, ins_stmt, |
5351 | bit_not_p, lp_nr_for_store (stmt), |
5352 | ops[0], ops[1]); |
5353 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5354 | { |
5355 | fprintf (stream: dump_file, format: "Recording immediate store from stmt:\n" ); |
5356 | print_gimple_stmt (dump_file, stmt, 0); |
5357 | } |
5358 | (*chain_info)->m_store_info.safe_push (obj: info); |
5359 | m_n_stores++; |
5360 | ret |= terminate_all_aliasing_chains (chain_info, stmt); |
5361 | /* If we reach the limit of stores to merge in a chain terminate and |
5362 | process the chain now. */ |
5363 | if ((*chain_info)->m_store_info.length () |
5364 | == (unsigned int) param_max_stores_to_merge) |
5365 | { |
5366 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5367 | fprintf (stream: dump_file, |
5368 | format: "Reached maximum number of statements to merge:\n" ); |
5369 | ret |= terminate_and_process_chain (chain_info: *chain_info); |
5370 | } |
5371 | } |
5372 | else |
5373 | { |
5374 | /* Store aliases any existing chain? */ |
5375 | ret |= terminate_all_aliasing_chains (NULL, stmt); |
5376 | |
5377 | /* Start a new chain. */ |
5378 | class imm_store_chain_info *new_chain |
5379 | = new imm_store_chain_info (m_stores_head, base_addr); |
5380 | info = new store_immediate_info (const_bitsize, const_bitpos, |
5381 | const_bitregion_start, |
5382 | const_bitregion_end, |
5383 | stmt, 0, rhs_code, n, ins_stmt, |
5384 | bit_not_p, lp_nr_for_store (stmt), |
5385 | ops[0], ops[1]); |
5386 | new_chain->m_store_info.safe_push (obj: info); |
5387 | m_n_stores++; |
5388 | m_stores.put (k: base_addr, v: new_chain); |
5389 | m_n_chains++; |
5390 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5391 | { |
5392 | fprintf (stream: dump_file, format: "Starting active chain number %u with statement:\n" , |
5393 | m_n_chains); |
5394 | print_gimple_stmt (dump_file, stmt, 0); |
5395 | fprintf (stream: dump_file, format: "The base object is:\n" ); |
5396 | print_generic_expr (dump_file, base_addr); |
5397 | fprintf (stream: dump_file, format: "\n" ); |
5398 | } |
5399 | } |
5400 | |
5401 | /* Prune oldest chains so that after adding the chain or store above |
5402 | we're again within the limits set by the params. */ |
5403 | if (m_n_chains > (unsigned)param_max_store_chains_to_track |
5404 | || m_n_stores > (unsigned)param_max_stores_to_track) |
5405 | { |
5406 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5407 | fprintf (stream: dump_file, format: "Too many chains (%u > %d) or stores (%u > %d), " |
5408 | "terminating oldest chain(s).\n" , m_n_chains, |
5409 | param_max_store_chains_to_track, m_n_stores, |
5410 | param_max_stores_to_track); |
5411 | imm_store_chain_info **e = &m_stores_head; |
5412 | unsigned idx = 0; |
5413 | unsigned n_stores = 0; |
5414 | while (*e) |
5415 | { |
5416 | if (idx >= (unsigned)param_max_store_chains_to_track |
5417 | || (n_stores + (*e)->m_store_info.length () |
5418 | > (unsigned)param_max_stores_to_track)) |
5419 | ret |= terminate_and_process_chain (chain_info: *e); |
5420 | else |
5421 | { |
5422 | n_stores += (*e)->m_store_info.length (); |
5423 | e = &(*e)->next; |
5424 | ++idx; |
5425 | } |
5426 | } |
5427 | } |
5428 | |
5429 | return ret; |
5430 | } |
5431 | |
5432 | /* Return true if STMT is a store valid for store merging. */ |
5433 | |
5434 | static bool |
5435 | store_valid_for_store_merging_p (gimple *stmt) |
5436 | { |
5437 | return gimple_assign_single_p (gs: stmt) |
5438 | && gimple_vdef (g: stmt) |
5439 | && lhs_valid_for_store_merging_p (lhs: gimple_assign_lhs (gs: stmt)) |
5440 | && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (s: stmt)); |
5441 | } |
5442 | |
5443 | enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID }; |
5444 | |
5445 | /* Return the status of basic block BB wrt store merging. */ |
5446 | |
5447 | static enum basic_block_status |
5448 | get_status_for_store_merging (basic_block bb) |
5449 | { |
5450 | unsigned int num_statements = 0; |
5451 | unsigned int num_constructors = 0; |
5452 | gimple_stmt_iterator gsi; |
5453 | edge e; |
5454 | gimple *last_stmt = NULL; |
5455 | |
5456 | for (gsi = gsi_after_labels (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
5457 | { |
5458 | gimple *stmt = gsi_stmt (i: gsi); |
5459 | |
5460 | if (is_gimple_debug (gs: stmt)) |
5461 | continue; |
5462 | |
5463 | last_stmt = stmt; |
5464 | |
5465 | if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2) |
5466 | break; |
5467 | |
5468 | if (is_gimple_assign (gs: stmt) |
5469 | && gimple_assign_rhs_code (gs: stmt) == CONSTRUCTOR) |
5470 | { |
5471 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
5472 | if (VECTOR_TYPE_P (TREE_TYPE (rhs)) |
5473 | && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))) |
5474 | && gimple_assign_lhs (gs: stmt) != NULL_TREE) |
5475 | { |
5476 | HOST_WIDE_INT sz |
5477 | = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT; |
5478 | if (sz == 16 || sz == 32 || sz == 64) |
5479 | { |
5480 | num_constructors = 1; |
5481 | break; |
5482 | } |
5483 | } |
5484 | } |
5485 | } |
5486 | |
5487 | if (num_statements == 0 && num_constructors == 0) |
5488 | return BB_INVALID; |
5489 | |
5490 | if (cfun->can_throw_non_call_exceptions && cfun->eh |
5491 | && store_valid_for_store_merging_p (stmt: last_stmt) |
5492 | && (e = find_fallthru_edge (edges: bb->succs)) |
5493 | && e->dest == bb->next_bb) |
5494 | return BB_EXTENDED_VALID; |
5495 | |
5496 | return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID; |
5497 | } |
5498 | |
5499 | /* Entry point for the pass. Go over each basic block recording chains of |
5500 | immediate stores. Upon encountering a terminating statement (as defined |
5501 | by stmt_terminates_chain_p) process the recorded stores and emit the widened |
5502 | variants. */ |
5503 | |
5504 | unsigned int |
5505 | pass_store_merging::execute (function *fun) |
5506 | { |
5507 | basic_block bb; |
5508 | hash_set<gimple *> orig_stmts; |
5509 | bool changed = false, open_chains = false; |
5510 | |
5511 | /* If the function can throw and catch non-call exceptions, we'll be trying |
5512 | to merge stores across different basic blocks so we need to first unsplit |
5513 | the EH edges in order to streamline the CFG of the function. */ |
5514 | if (cfun->can_throw_non_call_exceptions && cfun->eh) |
5515 | unsplit_eh_edges (); |
5516 | |
5517 | calculate_dominance_info (CDI_DOMINATORS); |
5518 | |
5519 | FOR_EACH_BB_FN (bb, fun) |
5520 | { |
5521 | const basic_block_status bb_status = get_status_for_store_merging (bb); |
5522 | gimple_stmt_iterator gsi; |
5523 | |
5524 | if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb))) |
5525 | { |
5526 | changed |= terminate_and_process_all_chains (); |
5527 | open_chains = false; |
5528 | } |
5529 | |
5530 | if (bb_status == BB_INVALID) |
5531 | continue; |
5532 | |
5533 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5534 | fprintf (stream: dump_file, format: "Processing basic block <%d>:\n" , bb->index); |
5535 | |
5536 | for (gsi = gsi_after_labels (bb); !gsi_end_p (i: gsi); ) |
5537 | { |
5538 | gimple *stmt = gsi_stmt (i: gsi); |
5539 | gsi_next (i: &gsi); |
5540 | |
5541 | if (is_gimple_debug (gs: stmt)) |
5542 | continue; |
5543 | |
5544 | if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (s: stmt)) |
5545 | { |
5546 | /* Terminate all chains. */ |
5547 | if (dump_file && (dump_flags & TDF_DETAILS)) |
5548 | fprintf (stream: dump_file, format: "Volatile access terminates " |
5549 | "all chains\n" ); |
5550 | changed |= terminate_and_process_all_chains (); |
5551 | open_chains = false; |
5552 | continue; |
5553 | } |
5554 | |
5555 | if (is_gimple_assign (gs: stmt) |
5556 | && gimple_assign_rhs_code (gs: stmt) == CONSTRUCTOR |
5557 | && maybe_optimize_vector_constructor (cur_stmt: stmt)) |
5558 | continue; |
5559 | |
5560 | if (store_valid_for_store_merging_p (stmt)) |
5561 | changed |= process_store (stmt); |
5562 | else |
5563 | changed |= terminate_all_aliasing_chains (NULL, stmt); |
5564 | } |
5565 | |
5566 | if (bb_status == BB_EXTENDED_VALID) |
5567 | open_chains = true; |
5568 | else |
5569 | { |
5570 | changed |= terminate_and_process_all_chains (); |
5571 | open_chains = false; |
5572 | } |
5573 | } |
5574 | |
5575 | if (open_chains) |
5576 | changed |= terminate_and_process_all_chains (); |
5577 | |
5578 | /* If the function can throw and catch non-call exceptions and something |
5579 | changed during the pass, then the CFG has (very likely) changed too. */ |
5580 | if (cfun->can_throw_non_call_exceptions && cfun->eh && changed) |
5581 | { |
5582 | free_dominance_info (CDI_DOMINATORS); |
5583 | return TODO_cleanup_cfg; |
5584 | } |
5585 | |
5586 | return 0; |
5587 | } |
5588 | |
5589 | } // anon namespace |
5590 | |
5591 | /* Construct and return a store merging pass object. */ |
5592 | |
5593 | gimple_opt_pass * |
5594 | make_pass_store_merging (gcc::context *ctxt) |
5595 | { |
5596 | return new pass_store_merging (ctxt); |
5597 | } |
5598 | |
5599 | #if CHECKING_P |
5600 | |
5601 | namespace selftest { |
5602 | |
5603 | /* Selftests for store merging helpers. */ |
5604 | |
5605 | /* Assert that all elements of the byte arrays X and Y, both of length N |
5606 | are equal. */ |
5607 | |
5608 | static void |
5609 | verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n) |
5610 | { |
5611 | for (unsigned int i = 0; i < n; i++) |
5612 | { |
5613 | if (x[i] != y[i]) |
5614 | { |
5615 | fprintf (stderr, format: "Arrays do not match. X:\n" ); |
5616 | dump_char_array (stderr, ptr: x, len: n); |
5617 | fprintf (stderr, format: "Y:\n" ); |
5618 | dump_char_array (stderr, ptr: y, len: n); |
5619 | } |
5620 | ASSERT_EQ (x[i], y[i]); |
5621 | } |
5622 | } |
5623 | |
5624 | /* Test shift_bytes_in_array_left and that it carries bits across between |
5625 | bytes correctly. */ |
5626 | |
5627 | static void |
5628 | verify_shift_bytes_in_array_left (void) |
5629 | { |
5630 | /* byte 1 | byte 0 |
5631 | 00011111 | 11100000. */ |
5632 | unsigned char orig[2] = { 0xe0, 0x1f }; |
5633 | unsigned char in[2]; |
5634 | memcpy (dest: in, src: orig, n: sizeof orig); |
5635 | |
5636 | unsigned char expected[2] = { 0x80, 0x7f }; |
5637 | shift_bytes_in_array_left (in, sizeof (in), 2); |
5638 | verify_array_eq (x: in, y: expected, n: sizeof (in)); |
5639 | |
5640 | memcpy (dest: in, src: orig, n: sizeof orig); |
5641 | memcpy (dest: expected, src: orig, n: sizeof orig); |
5642 | /* Check that shifting by zero doesn't change anything. */ |
5643 | shift_bytes_in_array_left (in, sizeof (in), 0); |
5644 | verify_array_eq (x: in, y: expected, n: sizeof (in)); |
5645 | |
5646 | } |
5647 | |
5648 | /* Test shift_bytes_in_array_right and that it carries bits across between |
5649 | bytes correctly. */ |
5650 | |
5651 | static void |
5652 | verify_shift_bytes_in_array_right (void) |
5653 | { |
5654 | /* byte 1 | byte 0 |
5655 | 00011111 | 11100000. */ |
5656 | unsigned char orig[2] = { 0x1f, 0xe0}; |
5657 | unsigned char in[2]; |
5658 | memcpy (dest: in, src: orig, n: sizeof orig); |
5659 | unsigned char expected[2] = { 0x07, 0xf8}; |
5660 | shift_bytes_in_array_right (in, sizeof (in), 2); |
5661 | verify_array_eq (x: in, y: expected, n: sizeof (in)); |
5662 | |
5663 | memcpy (dest: in, src: orig, n: sizeof orig); |
5664 | memcpy (dest: expected, src: orig, n: sizeof orig); |
5665 | /* Check that shifting by zero doesn't change anything. */ |
5666 | shift_bytes_in_array_right (in, sizeof (in), 0); |
5667 | verify_array_eq (x: in, y: expected, n: sizeof (in)); |
5668 | } |
5669 | |
5670 | /* Test clear_bit_region that it clears exactly the bits asked and |
5671 | nothing more. */ |
5672 | |
5673 | static void |
5674 | verify_clear_bit_region (void) |
5675 | { |
5676 | /* Start with all bits set and test clearing various patterns in them. */ |
5677 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; |
5678 | unsigned char in[3]; |
5679 | unsigned char expected[3]; |
5680 | memcpy (dest: in, src: orig, n: sizeof in); |
5681 | |
5682 | /* Check zeroing out all the bits. */ |
5683 | clear_bit_region (ptr: in, start: 0, len: 3 * BITS_PER_UNIT); |
5684 | expected[0] = expected[1] = expected[2] = 0; |
5685 | verify_array_eq (x: in, y: expected, n: sizeof in); |
5686 | |
5687 | memcpy (dest: in, src: orig, n: sizeof in); |
5688 | /* Leave the first and last bits intact. */ |
5689 | clear_bit_region (ptr: in, start: 1, len: 3 * BITS_PER_UNIT - 2); |
5690 | expected[0] = 0x1; |
5691 | expected[1] = 0; |
5692 | expected[2] = 0x80; |
5693 | verify_array_eq (x: in, y: expected, n: sizeof in); |
5694 | } |
5695 | |
5696 | /* Test clear_bit_region_be that it clears exactly the bits asked and |
5697 | nothing more. */ |
5698 | |
5699 | static void |
5700 | verify_clear_bit_region_be (void) |
5701 | { |
5702 | /* Start with all bits set and test clearing various patterns in them. */ |
5703 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; |
5704 | unsigned char in[3]; |
5705 | unsigned char expected[3]; |
5706 | memcpy (dest: in, src: orig, n: sizeof in); |
5707 | |
5708 | /* Check zeroing out all the bits. */ |
5709 | clear_bit_region_be (ptr: in, BITS_PER_UNIT - 1, len: 3 * BITS_PER_UNIT); |
5710 | expected[0] = expected[1] = expected[2] = 0; |
5711 | verify_array_eq (x: in, y: expected, n: sizeof in); |
5712 | |
5713 | memcpy (dest: in, src: orig, n: sizeof in); |
5714 | /* Leave the first and last bits intact. */ |
5715 | clear_bit_region_be (ptr: in, BITS_PER_UNIT - 2, len: 3 * BITS_PER_UNIT - 2); |
5716 | expected[0] = 0x80; |
5717 | expected[1] = 0; |
5718 | expected[2] = 0x1; |
5719 | verify_array_eq (x: in, y: expected, n: sizeof in); |
5720 | } |
5721 | |
5722 | |
5723 | /* Run all of the selftests within this file. */ |
5724 | |
5725 | void |
5726 | store_merging_cc_tests (void) |
5727 | { |
5728 | verify_shift_bytes_in_array_left (); |
5729 | verify_shift_bytes_in_array_right (); |
5730 | verify_clear_bit_region (); |
5731 | verify_clear_bit_region_be (); |
5732 | } |
5733 | |
5734 | } // namespace selftest |
5735 | #endif /* CHECKING_P. */ |
5736 | |