1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
6#define RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
7
8#include "vm/compiler/runtime_api.h"
9#if defined(DART_PRECOMPILED_RUNTIME)
10#error "AOT runtime should not use compiler sources (including header files)"
11#endif // defined(DART_PRECOMPILED_RUNTIME)
12
13#include <functional>
14
15#include "vm/allocation.h"
16#include "vm/code_descriptors.h"
17#include "vm/compiler/assembler/assembler.h"
18#include "vm/compiler/backend/code_statistics.h"
19#include "vm/compiler/backend/il.h"
20#include "vm/compiler/backend/locations.h"
21#include "vm/runtime_entry.h"
22
23namespace dart {
24
25// Forward declarations.
26class CatchEntryMovesMapBuilder;
27class Code;
28class DeoptInfoBuilder;
29class FlowGraph;
30class FlowGraphCompiler;
31class Function;
32template <typename T>
33class GrowableArray;
34class ParsedFunction;
35class SpeculativeInliningPolicy;
36
37namespace compiler {
38struct TableSelector;
39}
40
41// Used in methods which need conditional access to a temporary register.
42// May only be used to allocate a single temporary register.
43class TemporaryRegisterAllocator : public ValueObject {
44 public:
45 virtual ~TemporaryRegisterAllocator() {}
46 virtual Register AllocateTemporary() = 0;
47 virtual void ReleaseTemporary() = 0;
48};
49
50class ConstantTemporaryAllocator : public TemporaryRegisterAllocator {
51 public:
52 explicit ConstantTemporaryAllocator(Register tmp) : tmp_(tmp) {}
53
54 Register AllocateTemporary() override { return tmp_; }
55 void ReleaseTemporary() override {}
56
57 private:
58 Register const tmp_;
59};
60
61class NoTemporaryAllocator : public TemporaryRegisterAllocator {
62 public:
63 Register AllocateTemporary() override { UNREACHABLE(); }
64 void ReleaseTemporary() override { UNREACHABLE(); }
65};
66
67// Used for describing a deoptimization point after call (lazy deoptimization).
68// For deoptimization before instruction use class CompilerDeoptInfoWithStub.
69class CompilerDeoptInfo : public ZoneAllocated {
70 public:
71 CompilerDeoptInfo(intptr_t deopt_id,
72 ICData::DeoptReasonId reason,
73 uint32_t flags,
74 Environment* deopt_env)
75 : pc_offset_(-1),
76 deopt_id_(deopt_id),
77 reason_(reason),
78 flags_(flags),
79 deopt_env_(deopt_env) {
80 ASSERT(deopt_env != nullptr);
81 }
82 virtual ~CompilerDeoptInfo() {}
83
84 TypedDataPtr CreateDeoptInfo(FlowGraphCompiler* compiler,
85 DeoptInfoBuilder* builder,
86 const Array& deopt_table);
87
88 // No code needs to be generated.
89 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix) {}
90
91 intptr_t pc_offset() const { return pc_offset_; }
92 void set_pc_offset(intptr_t offset) { pc_offset_ = offset; }
93
94 intptr_t deopt_id() const { return deopt_id_; }
95 ICData::DeoptReasonId reason() const { return reason_; }
96 uint32_t flags() const { return flags_; }
97 const Environment* deopt_env() const { return deopt_env_; }
98
99 private:
100 void EmitMaterializations(Environment* env, DeoptInfoBuilder* builder);
101
102 void AllocateOutgoingArguments(Environment* env);
103
104 intptr_t pc_offset_;
105 const intptr_t deopt_id_;
106 const ICData::DeoptReasonId reason_;
107 const uint32_t flags_;
108 Environment* deopt_env_;
109
110 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfo);
111};
112
113class CompilerDeoptInfoWithStub : public CompilerDeoptInfo {
114 public:
115 CompilerDeoptInfoWithStub(intptr_t deopt_id,
116 ICData::DeoptReasonId reason,
117 uint32_t flags,
118 Environment* deopt_env)
119 : CompilerDeoptInfo(deopt_id, reason, flags, deopt_env), entry_label_() {
120 ASSERT(reason != ICData::kDeoptAtCall);
121 }
122
123 compiler::Label* entry_label() { return &entry_label_; }
124
125 // Implementation is in architecture specific file.
126 virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix);
127
128 const char* Name() const {
129 const char* kFormat = "Deopt stub for id %d, reason: %s";
130 const intptr_t len = Utils::SNPrint(str: nullptr, size: 0, format: kFormat, deopt_id(),
131 DeoptReasonToCString(deopt_reason: reason())) +
132 1;
133 char* chars = Thread::Current()->zone()->Alloc<char>(len);
134 Utils::SNPrint(str: chars, size: len, format: kFormat, deopt_id(),
135 DeoptReasonToCString(deopt_reason: reason()));
136 return chars;
137 }
138
139 private:
140 compiler::Label entry_label_;
141
142 DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfoWithStub);
143};
144
145class SlowPathCode : public ZoneAllocated {
146 public:
147 explicit SlowPathCode(Instruction* instruction)
148 : instruction_(instruction), entry_label_(), exit_label_() {}
149 virtual ~SlowPathCode() {}
150
151 Instruction* instruction() const { return instruction_; }
152 compiler::Label* entry_label() { return &entry_label_; }
153 compiler::Label* exit_label() { return &exit_label_; }
154
155 void GenerateCode(FlowGraphCompiler* compiler) {
156 EmitNativeCode(compiler);
157 ASSERT(entry_label_.IsBound());
158 }
159
160 private:
161 virtual void EmitNativeCode(FlowGraphCompiler* compiler) = 0;
162
163 Instruction* instruction_;
164 compiler::Label entry_label_;
165 compiler::Label exit_label_;
166
167 DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
168};
169
170template <typename T>
171class TemplateSlowPathCode : public SlowPathCode {
172 public:
173 explicit TemplateSlowPathCode(T* instruction) : SlowPathCode(instruction) {}
174
175 T* instruction() const {
176 return static_cast<T*>(SlowPathCode::instruction());
177 }
178};
179
180class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
181 public:
182 BoxAllocationSlowPath(Instruction* instruction,
183 const Class& cls,
184 Register result)
185 : TemplateSlowPathCode(instruction), cls_(cls), result_(result) {}
186
187 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
188
189 static void Allocate(FlowGraphCompiler* compiler,
190 Instruction* instruction,
191 const Class& cls,
192 Register result,
193 Register temp);
194
195 private:
196 const Class& cls_;
197 const Register result_;
198};
199
200class DoubleToIntegerSlowPath
201 : public TemplateSlowPathCode<DoubleToIntegerInstr> {
202 public:
203 DoubleToIntegerSlowPath(DoubleToIntegerInstr* instruction,
204 FpuRegister value_reg)
205 : TemplateSlowPathCode(instruction), value_reg_(value_reg) {}
206
207 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
208
209 private:
210 FpuRegister value_reg_;
211};
212
213// Slow path code which calls runtime entry to throw an exception.
214class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
215 public:
216 ThrowErrorSlowPathCode(Instruction* instruction,
217 const RuntimeEntry& runtime_entry)
218 : TemplateSlowPathCode(instruction), runtime_entry_(runtime_entry) {}
219
220 // This name appears in disassembly.
221 virtual const char* name() = 0;
222
223 // Subclasses can override these methods to customize slow path code.
224 virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) {}
225 virtual void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) {}
226 virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler* compiler) {}
227
228 // Returns number of arguments for runtime call (if shared stub is not used).
229 virtual intptr_t GetNumberOfArgumentsForRuntimeCall() { return 0; }
230
231 virtual void EmitSharedStubCall(FlowGraphCompiler* compiler,
232 bool save_fpu_registers) {
233 UNREACHABLE();
234 }
235
236 virtual void EmitNativeCode(FlowGraphCompiler* compiler);
237
238 private:
239 const RuntimeEntry& runtime_entry_;
240};
241
242class NullErrorSlowPath : public ThrowErrorSlowPathCode {
243 public:
244 explicit NullErrorSlowPath(CheckNullInstr* instruction)
245 : ThrowErrorSlowPathCode(instruction,
246 GetRuntimeEntry(exception_type: instruction->exception_type())) {
247 }
248
249 CheckNullInstr::ExceptionType exception_type() const {
250 return instruction()->AsCheckNull()->exception_type();
251 }
252
253 const char* name() override;
254
255 void EmitSharedStubCall(FlowGraphCompiler* compiler,
256 bool save_fpu_registers) override;
257
258 void AddMetadataForRuntimeCall(FlowGraphCompiler* compiler) override {
259 CheckNullInstr::AddMetadataForRuntimeCall(check_null: instruction()->AsCheckNull(),
260 compiler);
261 }
262
263 static CodePtr GetStub(FlowGraphCompiler* compiler,
264 CheckNullInstr::ExceptionType exception_type,
265 bool save_fpu_registers);
266
267 private:
268 static const RuntimeEntry& GetRuntimeEntry(
269 CheckNullInstr::ExceptionType exception_type);
270};
271
272class RangeErrorSlowPath : public ThrowErrorSlowPathCode {
273 public:
274 explicit RangeErrorSlowPath(GenericCheckBoundInstr* instruction)
275 : ThrowErrorSlowPathCode(instruction, kRangeErrorRuntimeEntry) {}
276 virtual const char* name() { return "check bound"; }
277
278 virtual intptr_t GetNumberOfArgumentsForRuntimeCall() {
279 return 2; // length and index
280 }
281
282 virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler* compiler);
283
284 virtual void EmitSharedStubCall(FlowGraphCompiler* compiler,
285 bool save_fpu_registers);
286};
287
288class WriteErrorSlowPath : public ThrowErrorSlowPathCode {
289 public:
290 explicit WriteErrorSlowPath(CheckWritableInstr* instruction)
291 : ThrowErrorSlowPathCode(instruction, kWriteErrorRuntimeEntry) {}
292 virtual const char* name() { return "check writable"; }
293
294 virtual void EmitSharedStubCall(FlowGraphCompiler* compiler,
295 bool save_fpu_registers);
296};
297
298class LateInitializationErrorSlowPath : public ThrowErrorSlowPathCode {
299 public:
300 explicit LateInitializationErrorSlowPath(Instruction* instruction)
301 : ThrowErrorSlowPathCode(instruction,
302 kLateFieldNotInitializedErrorRuntimeEntry) {
303 ASSERT(instruction->IsLoadField() || instruction->IsLoadStaticField());
304 }
305 virtual const char* name() { return "late initialization error"; }
306
307 virtual intptr_t GetNumberOfArgumentsForRuntimeCall() {
308 return 1; // field
309 }
310
311 virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler* compiler);
312
313 virtual void EmitSharedStubCall(FlowGraphCompiler* compiler,
314 bool save_fpu_registers);
315
316 private:
317 FieldPtr OriginalField() const {
318 return instruction()->IsLoadField()
319 ? instruction()->AsLoadField()->slot().field().Original()
320 : instruction()->AsLoadStaticField()->field().Original();
321 }
322};
323
324class FlowGraphCompiler : public ValueObject {
325 private:
326 class BlockInfo : public ZoneAllocated {
327 public:
328 BlockInfo()
329 : block_label_(),
330 jump_label_(&block_label_),
331 next_nonempty_label_(nullptr),
332 is_marked_(false) {}
333
334 // The label to jump to when control is transferred to this block. For
335 // nonempty blocks it is the label of the block itself. For empty
336 // blocks it is the label of the first nonempty successor block.
337 compiler::Label* jump_label() const { return jump_label_; }
338 void set_jump_label(compiler::Label* label) { jump_label_ = label; }
339
340 // The label of the first nonempty block after this one in the block
341 // order, or nullptr if there is no nonempty block following this one.
342 compiler::Label* next_nonempty_label() const {
343 return next_nonempty_label_;
344 }
345 void set_next_nonempty_label(compiler::Label* label) {
346 next_nonempty_label_ = label;
347 }
348
349 bool WasCompacted() const { return jump_label_ != &block_label_; }
350
351 // Block compaction is recursive. Block info for already-compacted
352 // blocks is marked so as to avoid cycles in the graph.
353 bool is_marked() const { return is_marked_; }
354 void mark() { is_marked_ = true; }
355
356 private:
357 compiler::Label block_label_;
358
359 compiler::Label* jump_label_;
360 compiler::Label* next_nonempty_label_;
361
362 bool is_marked_;
363 };
364
365 public:
366 FlowGraphCompiler(compiler::Assembler* assembler,
367 FlowGraph* flow_graph,
368 const ParsedFunction& parsed_function,
369 bool is_optimizing,
370 SpeculativeInliningPolicy* speculative_policy,
371 const GrowableArray<const Function*>& inline_id_to_function,
372 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
373 const GrowableArray<intptr_t>& caller_inline_id,
374 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
375 CodeStatistics* stats = nullptr);
376
377 void ArchSpecificInitialization();
378
379 ~FlowGraphCompiler();
380
381 static bool SupportsUnboxedDoubles();
382 static bool SupportsUnboxedSimd128();
383 static bool CanConvertInt64ToDouble();
384
385 // Accessors.
386 compiler::Assembler* assembler() const { return assembler_; }
387 const ParsedFunction& parsed_function() const { return parsed_function_; }
388 const Function& function() const { return parsed_function_.function(); }
389 const GrowableArray<BlockEntryInstr*>& block_order() const {
390 return block_order_;
391 }
392 const GrowableArray<const compiler::TableSelector*>&
393 dispatch_table_call_targets() const {
394 return dispatch_table_call_targets_;
395 }
396
397 // If 'ForcedOptimization()' returns 'true', we are compiling in optimized
398 // mode for a function which cannot deoptimize. Certain optimizations, e.g.
399 // speculative optimizations and call patching are disabled.
400 bool ForcedOptimization() const { return function().ForceOptimize(); }
401
402 const FlowGraph& flow_graph() const {
403 return intrinsic_mode() ? *intrinsic_flow_graph_ : flow_graph_;
404 }
405
406 BlockEntryInstr* current_block() const { return current_block_; }
407 void set_current_block(BlockEntryInstr* value) { current_block_ = value; }
408
409 Instruction* current_instruction() const { return current_instruction_; }
410
411 bool CanOptimize() const;
412 bool CanOptimizeFunction() const;
413 bool CanOSRFunction() const;
414 bool is_optimizing() const { return is_optimizing_; }
415
416 void InsertBSSRelocation(BSS::Relocation reloc);
417 void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp);
418
419 // The function was fully intrinsified, so the body is unreachable.
420 //
421 // We still need to compile the body in unoptimized mode because the
422 // 'ICData's are added to the function's 'ic_data_array_' when instance
423 // calls are compiled.
424 bool skip_body_compilation() const {
425 return fully_intrinsified_ && is_optimizing();
426 }
427
428 void EnterIntrinsicMode();
429 void ExitIntrinsicMode();
430 bool intrinsic_mode() const { return intrinsic_mode_; }
431
432 void set_intrinsic_flow_graph(const FlowGraph& flow_graph) {
433 intrinsic_flow_graph_ = &flow_graph;
434 }
435
436 void set_intrinsic_slow_path_label(compiler::Label* label) {
437 ASSERT(intrinsic_slow_path_label_ == nullptr || label == nullptr);
438 intrinsic_slow_path_label_ = label;
439 }
440 compiler::Label* intrinsic_slow_path_label() const {
441 ASSERT(intrinsic_slow_path_label_ != nullptr);
442 return intrinsic_slow_path_label_;
443 }
444
445 bool ForceSlowPathForStackOverflow() const;
446
447 const GrowableArray<BlockInfo*>& block_info() const { return block_info_; }
448
449 void StatsBegin(Instruction* instr) {
450 if (stats_ != nullptr) stats_->Begin(instruction: instr);
451 }
452
453 void StatsEnd(Instruction* instr) {
454 if (stats_ != nullptr) stats_->End(instruction: instr);
455 }
456
457 void SpecialStatsBegin(intptr_t tag) {
458 if (stats_ != nullptr) stats_->SpecialBegin(tag);
459 }
460
461 void SpecialStatsEnd(intptr_t tag) {
462 if (stats_ != nullptr) stats_->SpecialEnd(tag);
463 }
464
465 GrowableArray<const Field*>& used_static_fields() {
466 return used_static_fields_;
467 }
468
469 // Constructor is lightweight, major initialization work should occur here.
470 // This makes it easier to measure time spent in the compiler.
471 void InitCompiler();
472
473 void CompileGraph();
474
475 void EmitPrologue();
476
477 void VisitBlocks();
478
479 void EmitFunctionEntrySourcePositionDescriptorIfNeeded();
480
481 // Bail out of the flow graph compiler. Does not return to the caller.
482 void Bailout(const char* reason);
483
484 // Returns 'true' if regular code generation should be skipped.
485 bool TryIntrinsify();
486
487 // Emits code for a generic move from a location 'src' to a location 'dst'.
488 //
489 // Note that Location does not include a size (that can only be deduced from
490 // a Representation), so these moves might overapproximate the size needed
491 // to move. The maximal overapproximation is moving 8 bytes instead of 4 on
492 // 64 bit architectures. This overapproximation is not a problem, because
493 // the Dart calling convention only uses word-sized stack slots.
494 //
495 // TODO(dartbug.com/40400): Express this in terms of EmitMove(NativeLocation
496 // NativeLocation) to remove code duplication.
497 void EmitMove(Location dst, Location src, TemporaryRegisterAllocator* temp);
498
499 // Emits code for a move from a location `src` to a location `dst`.
500 //
501 // Takes into account the payload and container representations of `dst` and
502 // `src` to do the smallest move possible, and sign (or zero) extend or
503 // truncate if needed.
504 //
505 // Makes use of TMP, FpuTMP, and `temp`.
506 void EmitNativeMove(const compiler::ffi::NativeLocation& dst,
507 const compiler::ffi::NativeLocation& src,
508 TemporaryRegisterAllocator* temp);
509
510 // Helper method to move from a Location to a NativeLocation.
511 void EmitMoveToNative(const compiler::ffi::NativeLocation& dst,
512 Location src_loc,
513 Representation src_type,
514 TemporaryRegisterAllocator* temp);
515
516 // Helper method to move from a NativeLocation to a Location.
517 void EmitMoveFromNative(Location dst_loc,
518 Representation dst_type,
519 const compiler::ffi::NativeLocation& src,
520 TemporaryRegisterAllocator* temp);
521
522 bool CheckAssertAssignableTypeTestingABILocations(
523 const LocationSummary& locs);
524
525 void GenerateAssertAssignable(CompileType* receiver_type,
526 const InstructionSource& source,
527 intptr_t deopt_id,
528 Environment* env,
529 const String& dst_name,
530 LocationSummary* locs);
531
532#if !defined(TARGET_ARCH_IA32)
533 void GenerateCallerChecksForAssertAssignable(CompileType* receiver_type,
534 const AbstractType& dst_type,
535 compiler::Label* done);
536
537 void GenerateTTSCall(const InstructionSource& source,
538 intptr_t deopt_id,
539 Environment* env,
540 Register reg_with_type,
541 const AbstractType& dst_type,
542 const String& dst_name,
543 LocationSummary* locs);
544
545 static void GenerateIndirectTTSCall(compiler::Assembler* assembler,
546 Register reg_with_type,
547 intptr_t sub_type_cache_index);
548#endif
549
550 void GenerateStubCall(const InstructionSource& source,
551 const Code& stub,
552 UntaggedPcDescriptors::Kind kind,
553 LocationSummary* locs,
554 intptr_t deopt_id,
555 Environment* env);
556
557 void GenerateNonLazyDeoptableStubCall(const InstructionSource& source,
558 const Code& stub,
559 UntaggedPcDescriptors::Kind kind,
560 LocationSummary* locs);
561
562 void GeneratePatchableCall(const InstructionSource& source,
563 const Code& stub,
564 UntaggedPcDescriptors::Kind kind,
565 LocationSummary* locs);
566
567 void GenerateDartCall(intptr_t deopt_id,
568 const InstructionSource& source,
569 const Code& stub,
570 UntaggedPcDescriptors::Kind kind,
571 LocationSummary* locs,
572 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
573
574 void GenerateStaticDartCall(
575 intptr_t deopt_id,
576 const InstructionSource& source,
577 UntaggedPcDescriptors::Kind kind,
578 LocationSummary* locs,
579 const Function& target,
580 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
581
582 void GenerateInstanceOf(const InstructionSource& source,
583 intptr_t deopt_id,
584 Environment* env,
585 const AbstractType& type,
586 LocationSummary* locs);
587
588 void GenerateInstanceCall(intptr_t deopt_id,
589 const InstructionSource& source,
590 LocationSummary* locs,
591 const ICData& ic_data,
592 Code::EntryKind entry_kind,
593 bool receiver_can_be_smi);
594
595 void GenerateStaticCall(
596 intptr_t deopt_id,
597 const InstructionSource& source,
598 const Function& function,
599 ArgumentsInfo args_info,
600 LocationSummary* locs,
601 const ICData& ic_data_in,
602 ICData::RebindRule rebind_rule,
603 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
604
605 void GenerateNumberTypeCheck(Register kClassIdReg,
606 const AbstractType& type,
607 compiler::Label* is_instance_lbl,
608 compiler::Label* is_not_instance_lbl);
609 void GenerateStringTypeCheck(Register kClassIdReg,
610 compiler::Label* is_instance_lbl,
611 compiler::Label* is_not_instance_lbl);
612 void GenerateListTypeCheck(Register kClassIdReg,
613 compiler::Label* is_instance_lbl);
614
615 // Returns true if no further checks are necessary but the code coming after
616 // the emitted code here is still required do a runtime call (for the negative
617 // case of throwing an exception).
618 bool GenerateSubtypeRangeCheck(Register class_id_reg,
619 const Class& type_class,
620 compiler::Label* is_subtype_lbl);
621
622 // We test up to 4 different cid ranges, if we would need to test more in
623 // order to get a definite answer we fall back to the old mechanism (namely
624 // of going into the subtyping cache)
625 static constexpr intptr_t kMaxNumberOfCidRangesToTest = 4;
626
627 // If [fall_through_if_inside] is `true`, then [outside_range_lbl] must be
628 // supplied, since it will be jumped to in the last case if the cid is outside
629 // the range.
630 //
631 // Returns whether [class_id_reg] is clobbered by the check.
632 static bool GenerateCidRangesCheck(
633 compiler::Assembler* assembler,
634 Register class_id_reg,
635 const CidRangeVector& cid_ranges,
636 compiler::Label* inside_range_lbl,
637 compiler::Label* outside_range_lbl = nullptr,
638 bool fall_through_if_inside = false);
639
640 void EmitOptimizedInstanceCall(
641 const Code& stub,
642 const ICData& ic_data,
643 intptr_t deopt_id,
644 const InstructionSource& source,
645 LocationSummary* locs,
646 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
647
648 void EmitInstanceCallJIT(const Code& stub,
649 const ICData& ic_data,
650 intptr_t deopt_id,
651 const InstructionSource& source,
652 LocationSummary* locs,
653 Code::EntryKind entry_kind);
654
655 void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr* call,
656 const CallTargets& targets,
657 ArgumentsInfo args_info,
658 intptr_t deopt_id,
659 const InstructionSource& source,
660 LocationSummary* locs,
661 bool complete,
662 intptr_t total_call_count,
663 bool receiver_can_be_smi = true);
664
665 void EmitMegamorphicInstanceCall(const ICData& icdata,
666 intptr_t deopt_id,
667 const InstructionSource& source,
668 LocationSummary* locs) {
669 const String& name = String::Handle(ptr: icdata.target_name());
670 const Array& arguments_descriptor =
671 Array::Handle(ptr: icdata.arguments_descriptor());
672 EmitMegamorphicInstanceCall(function_name: name, arguments_descriptor, deopt_id, source,
673 locs);
674 }
675
676 void EmitMegamorphicInstanceCall(const String& function_name,
677 const Array& arguments_descriptor,
678 intptr_t deopt_id,
679 const InstructionSource& source,
680 LocationSummary* locs);
681
682 void EmitInstanceCallAOT(
683 const ICData& ic_data,
684 intptr_t deopt_id,
685 const InstructionSource& source,
686 LocationSummary* locs,
687 Code::EntryKind entry_kind = Code::EntryKind::kNormal,
688 bool receiver_can_be_smi = true);
689
690 void EmitTestAndCall(const CallTargets& targets,
691 const String& function_name,
692 ArgumentsInfo args_info,
693 compiler::Label* failed,
694 compiler::Label* match_found,
695 intptr_t deopt_id,
696 const InstructionSource& source_index,
697 LocationSummary* locs,
698 bool complete,
699 intptr_t total_ic_calls,
700 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
701
702 void EmitDispatchTableCall(int32_t selector_offset,
703 const Array& arguments_descriptor);
704
705 Condition EmitEqualityRegConstCompare(Register reg,
706 const Object& obj,
707 bool needs_number_check,
708 const InstructionSource& source,
709 intptr_t deopt_id);
710 Condition EmitEqualityRegRegCompare(Register left,
711 Register right,
712 bool needs_number_check,
713 const InstructionSource& source,
714 intptr_t deopt_id);
715 Condition EmitBoolTest(Register value, BranchLabels labels, bool invert);
716
717 bool NeedsEdgeCounter(BlockEntryInstr* block);
718
719 void EmitEdgeCounter(intptr_t edge_id);
720
721 void RecordCatchEntryMoves(Environment* env);
722
723 void EmitCallToStub(const Code& stub);
724 void EmitJumpToStub(const Code& stub);
725 void EmitTailCallToStub(const Code& stub);
726
727 void EmitDropArguments(intptr_t count);
728
729 // Emits the following metadata for the current PC:
730 //
731 // * Attaches current try index
732 // * Attaches stackmaps
733 // * Attaches catch entry moves (in AOT)
734 // * Deoptimization information (in JIT)
735 //
736 // If [env] is not `nullptr` it will be used instead of the
737 // `pending_deoptimization_env`.
738 void EmitCallsiteMetadata(const InstructionSource& source,
739 intptr_t deopt_id,
740 UntaggedPcDescriptors::Kind kind,
741 LocationSummary* locs,
742 Environment* env);
743
744 void EmitYieldPositionMetadata(const InstructionSource& source,
745 intptr_t yield_index);
746
747 void EmitComment(Instruction* instr);
748
749 // Returns stack size (number of variables on stack for unoptimized
750 // code, or number of spill slots for optimized code).
751 intptr_t StackSize() const;
752
753 // Returns the number of extra stack slots used during an Osr entry
754 // (values for all [ParameterInstr]s, representing local variables
755 // and expression stack values, are already on the stack).
756 intptr_t ExtraStackSlotsOnOsrEntry() const;
757
758#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
759 // Changes the base register of this Location if this allows us to utilize
760 // a better addressing mode. For RISC-V, this is the wider range of compressed
761 // instructions available for SP-relative load compared to FP-relative loads.
762 // Assumes `StackSize` accounts for everything at the point of use.
763 Location RebaseIfImprovesAddressing(Location loc) const;
764#endif // defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
765
766 // Returns assembler label associated with the given block entry.
767 compiler::Label* GetJumpLabel(BlockEntryInstr* block_entry) const;
768 bool WasCompacted(BlockEntryInstr* block_entry) const;
769
770 // Returns the label of the fall-through of the current block.
771 compiler::Label* NextNonEmptyLabel() const;
772
773 // Returns true if there is a next block after the current one in
774 // the block order and if it is the given block.
775 bool CanFallThroughTo(BlockEntryInstr* block_entry) const;
776
777 // Return true-, false- and fall-through label for a branch instruction.
778 BranchLabels CreateBranchLabels(BranchInstr* branch) const;
779
780 void AddExceptionHandler(CatchBlockEntryInstr* entry);
781 void SetNeedsStackTrace(intptr_t try_index);
782 void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind,
783 intptr_t deopt_id,
784 const InstructionSource& source);
785 void AddDescriptor(
786 UntaggedPcDescriptors::Kind kind,
787 intptr_t pc_offset,
788 intptr_t deopt_id,
789 const InstructionSource& source,
790 intptr_t try_index,
791 intptr_t yield_index = UntaggedPcDescriptors::kInvalidYieldIndex);
792
793 // Add NullCheck information for the current PC.
794 void AddNullCheck(const InstructionSource& source, const String& name);
795
796 void RecordSafepoint(LocationSummary* locs,
797 intptr_t slow_path_argument_count = 0);
798
799 compiler::Label* AddDeoptStub(intptr_t deopt_id,
800 ICData::DeoptReasonId reason,
801 uint32_t flags = 0);
802
803 CompilerDeoptInfo* AddDeoptIndexAtCall(intptr_t deopt_id, Environment* env);
804 CompilerDeoptInfo* AddSlowPathDeoptInfo(intptr_t deopt_id, Environment* env);
805
806 void AddSlowPathCode(SlowPathCode* slow_path);
807
808 void FinalizeExceptionHandlers(const Code& code);
809 void FinalizePcDescriptors(const Code& code);
810 ArrayPtr CreateDeoptInfo(compiler::Assembler* assembler);
811 void FinalizeStackMaps(const Code& code);
812 void FinalizeVarDescriptors(const Code& code);
813 void FinalizeCatchEntryMovesMap(const Code& code);
814 void FinalizeStaticCallTargetsTable(const Code& code);
815 void FinalizeCodeSourceMap(const Code& code);
816
817 const Class& double_class() const { return double_class_; }
818 const Class& mint_class() const { return mint_class_; }
819 const Class& float32x4_class() const { return float32x4_class_; }
820 const Class& float64x2_class() const { return float64x2_class_; }
821 const Class& int32x4_class() const { return int32x4_class_; }
822
823 const Class& BoxClassFor(Representation rep);
824
825 void SaveLiveRegisters(LocationSummary* locs);
826 void RestoreLiveRegisters(LocationSummary* locs);
827#if defined(DEBUG)
828 void ClobberDeadTempRegisters(LocationSummary* locs);
829#endif
830
831 // Returns a new environment based on [env] which accounts for the new
832 // locations of values in the slow path call.
833 Environment* SlowPathEnvironmentFor(Instruction* inst,
834 intptr_t num_slow_path_args) {
835 if (inst->env() == nullptr && is_optimizing()) {
836 if (pending_deoptimization_env_ == nullptr) {
837 return nullptr;
838 }
839 return SlowPathEnvironmentFor(env: pending_deoptimization_env_, locs: inst->locs(),
840 num_slow_path_args);
841 }
842 return SlowPathEnvironmentFor(env: inst->env(), locs: inst->locs(),
843 num_slow_path_args);
844 }
845
846 Environment* SlowPathEnvironmentFor(Environment* env,
847 LocationSummary* locs,
848 intptr_t num_slow_path_args);
849
850 intptr_t CurrentTryIndex() const {
851 if (current_block_ == nullptr) {
852 return kInvalidTryIndex;
853 }
854 return current_block_->try_index();
855 }
856
857 bool may_reoptimize() const { return may_reoptimize_; }
858
859 // Use in unoptimized compilation to preserve/reuse ICData.
860 //
861 // If [binary_smi_target] is non-null and we have to create the ICData, the
862 // ICData will get an (kSmiCid, kSmiCid, binary_smi_target) entry.
863 const ICData* GetOrAddInstanceCallICData(intptr_t deopt_id,
864 const String& target_name,
865 const Array& arguments_descriptor,
866 intptr_t num_args_tested,
867 const AbstractType& receiver_type,
868 const Function& binary_smi_target);
869
870 const ICData* GetOrAddStaticCallICData(intptr_t deopt_id,
871 const Function& target,
872 const Array& arguments_descriptor,
873 intptr_t num_args_tested,
874 ICData::RebindRule rebind_rule);
875
876 static const CallTargets* ResolveCallTargetsForReceiverCid(
877 intptr_t cid,
878 const String& selector,
879 const Array& args_desc_array);
880
881 const ZoneGrowableArray<const ICData*>& deopt_id_to_ic_data() const {
882 return *deopt_id_to_ic_data_;
883 }
884
885 Thread* thread() const { return thread_; }
886 IsolateGroup* isolate_group() const { return thread_->isolate_group(); }
887 Zone* zone() const { return zone_; }
888
889 void AddStubCallTarget(const Code& code);
890 void AddDispatchTableCallTarget(const compiler::TableSelector* selector);
891
892 ArrayPtr edge_counters_array() const { return edge_counters_array_.ptr(); }
893
894 ArrayPtr InliningIdToFunction() const;
895
896 void BeginCodeSourceRange(const InstructionSource& source);
897 void EndCodeSourceRange(const InstructionSource& source);
898
899 static bool LookupMethodFor(int class_id,
900 const String& name,
901 const ArgumentsDescriptor& args_desc,
902 Function* fn_return,
903 bool* class_is_abstract_return = nullptr);
904
905 // Returns new class-id bias.
906 //
907 // TODO(kustermann): We should move this code out of the [FlowGraphCompiler]!
908 static int EmitTestAndCallCheckCid(compiler::Assembler* assembler,
909 compiler::Label* label,
910 Register class_id_reg,
911 const CidRangeValue& range,
912 int bias,
913 bool jump_on_miss = true);
914
915 bool IsEmptyBlock(BlockEntryInstr* block) const;
916
917 private:
918 friend class BoxInt64Instr; // For AddPcRelativeCallStubTarget().
919 friend class CheckNullInstr; // For AddPcRelativeCallStubTarget().
920 friend class NullErrorSlowPath; // For AddPcRelativeCallStubTarget().
921 friend class CheckStackOverflowInstr; // For AddPcRelativeCallStubTarget().
922 friend class StoreIndexedInstr; // For AddPcRelativeCallStubTarget().
923 friend class StoreFieldInstr; // For AddPcRelativeCallStubTarget().
924 friend class CheckStackOverflowSlowPath; // For pending_deoptimization_env_.
925 friend class GraphIntrinsicCodeGenScope; // For optimizing_.
926
927 // Architecture specific implementation of simple native moves.
928 void EmitNativeMoveArchitecture(const compiler::ffi::NativeLocation& dst,
929 const compiler::ffi::NativeLocation& src);
930
931 void EmitFrameEntry();
932
933 bool TryIntrinsifyHelper();
934 void AddPcRelativeCallTarget(const Function& function,
935 Code::EntryKind entry_kind);
936 void AddPcRelativeCallStubTarget(const Code& stub_code);
937 void AddPcRelativeTailCallStubTarget(const Code& stub_code);
938 void AddPcRelativeTTSCallTypeTarget(const AbstractType& type);
939 void AddStaticCallTarget(const Function& function,
940 Code::EntryKind entry_kind);
941
942 void GenerateDeferredCode();
943
944 void EmitInstructionPrologue(Instruction* instr);
945 void EmitInstructionEpilogue(Instruction* instr);
946
947 // Emit code to load a Value into register 'dst'.
948 void LoadValue(Register dst, Value* value);
949
950 void EmitOptimizedStaticCall(
951 const Function& function,
952 const Array& arguments_descriptor,
953 intptr_t size_with_type_args,
954 intptr_t deopt_id,
955 const InstructionSource& source,
956 LocationSummary* locs,
957 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
958
959 void EmitUnoptimizedStaticCall(
960 intptr_t size_with_type_args,
961 intptr_t deopt_id,
962 const InstructionSource& source,
963 LocationSummary* locs,
964 const ICData& ic_data,
965 Code::EntryKind entry_kind = Code::EntryKind::kNormal);
966
967 // Helper for TestAndCall that calculates a good bias that
968 // allows more compact instructions to be emitted.
969 intptr_t ComputeGoodBiasForCidComparison(const CallTargets& sorted,
970 intptr_t max_immediate);
971
972 // More helpers for EmitTestAndCall.
973
974 static Register EmitTestCidRegister();
975
976 void EmitTestAndCallLoadReceiver(intptr_t count_without_type_args,
977 const Array& arguments_descriptor);
978
979 void EmitTestAndCallSmiBranch(compiler::Label* label, bool jump_if_smi);
980
981 void EmitTestAndCallLoadCid(Register class_id_reg);
982
983 // Type checking helper methods.
984 void CheckClassIds(Register class_id_reg,
985 const GrowableArray<intptr_t>& class_ids,
986 compiler::Label* is_instance_lbl,
987 compiler::Label* is_not_instance_lbl);
988
989 SubtypeTestCachePtr GenerateInlineInstanceof(
990 const InstructionSource& source,
991 const AbstractType& type,
992 compiler::Label* is_instance_lbl,
993 compiler::Label* is_not_instance_lbl);
994
995 SubtypeTestCachePtr GenerateInstantiatedTypeWithArgumentsTest(
996 const InstructionSource& source,
997 const AbstractType& dst_type,
998 compiler::Label* is_instance_lbl,
999 compiler::Label* is_not_instance_lbl);
1000
1001 bool GenerateInstantiatedTypeNoArgumentsTest(
1002 const InstructionSource& source,
1003 const AbstractType& dst_type,
1004 compiler::Label* is_instance_lbl,
1005 compiler::Label* is_not_instance_lbl);
1006
1007 SubtypeTestCachePtr GenerateUninstantiatedTypeTest(
1008 const InstructionSource& source,
1009 const AbstractType& dst_type,
1010 compiler::Label* is_instance_lbl,
1011 compiler::Label* is_not_instance_label);
1012
1013 SubtypeTestCachePtr GenerateFunctionTypeTest(
1014 const InstructionSource& source,
1015 const AbstractType& dst_type,
1016 compiler::Label* is_instance_lbl,
1017 compiler::Label* is_not_instance_label);
1018
1019 SubtypeTestCachePtr GenerateSubtype1TestCacheLookup(
1020 const InstructionSource& source,
1021 const Class& type_class,
1022 compiler::Label* is_instance_lbl,
1023 compiler::Label* is_not_instance_lbl);
1024
1025 enum class TypeTestStubKind {
1026 // Just check the instance cid (no closures).
1027 kTestTypeOneArg = 1,
1028 // Also check the instance type arguments.
1029 kTestTypeTwoArgs = 2,
1030 // Also check the instantiator type arguments for the destination type.
1031 kTestTypeThreeArgs = 3,
1032 // Also check the function type arguments for the destination type.
1033 kTestTypeFourArgs = 4,
1034 // Also check the parent function and delayed type arguments for a closure.
1035 kTestTypeSixArgs = 6,
1036 // Also check the destination type, as it is not known at compile time.
1037 kTestTypeSevenArgs = 7,
1038 };
1039
1040 static_assert(static_cast<intptr_t>(TypeTestStubKind::kTestTypeSevenArgs) ==
1041 SubtypeTestCache::kMaxInputs,
1042 "Need to adjust kTestTypeMaxArgs");
1043 static constexpr TypeTestStubKind kTestTypeMaxArgs =
1044 TypeTestStubKind::kTestTypeSevenArgs;
1045
1046 // Returns the number of used inputs for a given type test stub kind.
1047 intptr_t UsedInputsForTTSKind(TypeTestStubKind kind) {
1048 return static_cast<intptr_t>(kind);
1049 }
1050
1051 // Returns type test stub kind for a type test against type parameter type.
1052 TypeTestStubKind GetTypeTestStubKindForTypeParameter(
1053 const TypeParameter& type_param);
1054
1055 // Takes input from TypeTestABI registers (or stack on IA32), see
1056 // StubCodeCompiler::GenerateSubtypeNTestCacheStub for caller-save registers.
1057 SubtypeTestCachePtr GenerateCallSubtypeTestStub(
1058 TypeTestStubKind test_kind,
1059 compiler::Label* is_instance_lbl,
1060 compiler::Label* is_not_instance_lbl);
1061
1062 void GenerateBoolToJump(Register bool_reg,
1063 compiler::Label* is_true,
1064 compiler::Label* is_false);
1065
1066 void GenerateMethodExtractorIntrinsic(const Function& extracted_method,
1067 intptr_t type_arguments_field_offset);
1068
1069 void GenerateGetterIntrinsic(const Function& accessor, const Field& field);
1070
1071 // Perform a greedy local register allocation. Consider all registers free.
1072 void AllocateRegistersLocally(Instruction* instr);
1073
1074 // Map a block number in a forward iteration into the block number in the
1075 // corresponding reverse iteration. Used to obtain an index into
1076 // block_order for reverse iterations.
1077 intptr_t reverse_index(intptr_t index) const {
1078 return block_order_.length() - index - 1;
1079 }
1080
1081 void set_current_instruction(Instruction* current_instruction) {
1082 current_instruction_ = current_instruction;
1083 }
1084
1085 void CompactBlock(BlockEntryInstr* block);
1086 void CompactBlocks();
1087
1088 bool IsListClass(const Class& cls) const {
1089 return cls.ptr() == list_class_.ptr();
1090 }
1091
1092 void EmitSourceLine(Instruction* instr);
1093
1094 intptr_t GetOptimizationThreshold() const;
1095
1096#if defined(DEBUG)
1097 void FrameStateUpdateWith(Instruction* instr);
1098 void FrameStatePush(Definition* defn);
1099 void FrameStatePop(intptr_t count);
1100 bool FrameStateIsSafeToCall();
1101 void FrameStateClear();
1102#endif
1103
1104 // Returns true if instruction lookahead (window size one)
1105 // is amenable to a peephole optimization.
1106 bool IsPeephole(Instruction* instr) const;
1107
1108#if defined(DEBUG)
1109 bool CanCallDart() const {
1110 return current_instruction_ == nullptr ||
1111 current_instruction_->CanCallDart();
1112 }
1113#else
1114 bool CanCallDart() const { return true; }
1115#endif
1116
1117 bool CanPcRelativeCall(const Function& target) const;
1118 bool CanPcRelativeCall(const Code& target) const;
1119 bool CanPcRelativeCall(const AbstractType& target) const;
1120
1121 // This struct contains either function or code, the other one being nullptr.
1122 class StaticCallsStruct : public ZoneAllocated {
1123 public:
1124 Code::CallKind call_kind;
1125 Code::CallEntryPoint entry_point;
1126 const intptr_t offset;
1127 const Function* function; // Can be nullptr.
1128 const Code* code; // Can be nullptr.
1129 const AbstractType* dst_type; // Can be nullptr.
1130 StaticCallsStruct(Code::CallKind call_kind,
1131 Code::CallEntryPoint entry_point,
1132 intptr_t offset_arg,
1133 const Function* function_arg,
1134 const Code* code_arg,
1135 const AbstractType* dst_type)
1136 : call_kind(call_kind),
1137 entry_point(entry_point),
1138 offset(offset_arg),
1139 function(function_arg),
1140 code(code_arg),
1141 dst_type(dst_type) {
1142 DEBUG_ASSERT(function == nullptr ||
1143 function->IsNotTemporaryScopedHandle());
1144 DEBUG_ASSERT(code == nullptr || code->IsNotTemporaryScopedHandle());
1145 DEBUG_ASSERT(dst_type == nullptr ||
1146 dst_type->IsNotTemporaryScopedHandle());
1147 ASSERT(code == nullptr || dst_type == nullptr);
1148 }
1149
1150 private:
1151 DISALLOW_COPY_AND_ASSIGN(StaticCallsStruct);
1152 };
1153
1154 Thread* thread_;
1155 Zone* zone_;
1156 compiler::Assembler* assembler_;
1157 const ParsedFunction& parsed_function_;
1158 const FlowGraph& flow_graph_;
1159 const FlowGraph* intrinsic_flow_graph_ = nullptr;
1160 const GrowableArray<BlockEntryInstr*>& block_order_;
1161
1162#if defined(DEBUG)
1163 GrowableArray<Representation> frame_state_;
1164#endif
1165
1166 // Compiler specific per-block state. Indexed by postorder block number
1167 // for convenience. This is not the block's index in the block order,
1168 // which is reverse postorder.
1169 BlockEntryInstr* current_block_;
1170 ExceptionHandlerList* exception_handlers_list_;
1171 DescriptorList* pc_descriptors_list_;
1172 CompressedStackMapsBuilder* compressed_stackmaps_builder_;
1173 CodeSourceMapBuilder* code_source_map_builder_;
1174 CatchEntryMovesMapBuilder* catch_entry_moves_maps_builder_;
1175 GrowableArray<BlockInfo*> block_info_;
1176 GrowableArray<CompilerDeoptInfo*> deopt_infos_;
1177 GrowableArray<SlowPathCode*> slow_path_code_;
1178 // Fields that were referenced by generated code.
1179 // This list is needed by precompiler to ensure they are retained.
1180 GrowableArray<const Field*> used_static_fields_;
1181 // Stores static call targets as well as stub targets.
1182 // TODO(srdjan): Evaluate if we should store allocation stub targets into a
1183 // separate table?
1184 GrowableArray<StaticCallsStruct*> static_calls_target_table_;
1185 // The table selectors of all dispatch table calls in the current function.
1186 GrowableArray<const compiler::TableSelector*> dispatch_table_call_targets_;
1187 GrowableArray<IndirectGotoInstr*> indirect_gotos_;
1188 bool is_optimizing_;
1189 SpeculativeInliningPolicy* speculative_policy_;
1190 // Set to true if optimized code has IC calls.
1191 bool may_reoptimize_;
1192 // True while emitting intrinsic code.
1193 bool intrinsic_mode_;
1194 compiler::Label* intrinsic_slow_path_label_ = nullptr;
1195 bool fully_intrinsified_ = false;
1196 CodeStatistics* stats_;
1197
1198 // The definition whose value is supposed to be at the top of the
1199 // expression stack. Used by peephole optimization (window size one)
1200 // to eliminate redundant push/pop pairs.
1201 Definition* top_of_stack_ = nullptr;
1202
1203 const Class& double_class_;
1204 const Class& mint_class_;
1205 const Class& float32x4_class_;
1206 const Class& float64x2_class_;
1207 const Class& int32x4_class_;
1208 const Class& list_class_;
1209
1210 // Currently instructions generate deopt stubs internally by
1211 // calling AddDeoptStub. To communicate deoptimization environment
1212 // that should be used when deoptimizing we store it in this variable.
1213 // In future AddDeoptStub should be moved out of the instruction template.
1214 Environment* pending_deoptimization_env_;
1215
1216 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
1217 Array& edge_counters_array_;
1218
1219 // Instruction currently running EmitNativeCode().
1220 Instruction* current_instruction_ = nullptr;
1221
1222 DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
1223};
1224
1225} // namespace dart
1226
1227#endif // RUNTIME_VM_COMPILER_BACKEND_FLOW_GRAPH_COMPILER_H_
1228

Provided by KDAB

Privacy Policy
Learn more about Flutter for embedded and desktop on industrialflutter.com

source code of dart_sdk/runtime/vm/compiler/backend/flow_graph_compiler.h