1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2019 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef INTEL_ENGINE_PM_H |
7 | #define INTEL_ENGINE_PM_H |
8 | |
9 | #include "i915_drv.h" |
10 | #include "i915_request.h" |
11 | #include "intel_engine_types.h" |
12 | #include "intel_wakeref.h" |
13 | #include "intel_gt.h" |
14 | #include "intel_gt_pm.h" |
15 | |
16 | static inline bool |
17 | intel_engine_pm_is_awake(const struct intel_engine_cs *engine) |
18 | { |
19 | return intel_wakeref_is_active(wf: &engine->wakeref); |
20 | } |
21 | |
22 | static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) |
23 | { |
24 | __intel_wakeref_get(wf: &engine->wakeref); |
25 | } |
26 | |
27 | static inline void intel_engine_pm_get(struct intel_engine_cs *engine) |
28 | { |
29 | intel_wakeref_get(wf: &engine->wakeref); |
30 | } |
31 | |
32 | static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) |
33 | { |
34 | return intel_wakeref_get_if_active(wf: &engine->wakeref); |
35 | } |
36 | |
37 | static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) |
38 | { |
39 | if (!intel_engine_is_virtual(engine)) { |
40 | intel_wakeref_might_get(wf: &engine->wakeref); |
41 | } else { |
42 | struct intel_gt *gt = engine->gt; |
43 | struct intel_engine_cs *tengine; |
44 | intel_engine_mask_t tmp, mask = engine->mask; |
45 | |
46 | for_each_engine_masked(tengine, gt, mask, tmp) |
47 | intel_wakeref_might_get(wf: &tengine->wakeref); |
48 | } |
49 | intel_gt_pm_might_get(gt: engine->gt); |
50 | } |
51 | |
52 | static inline void intel_engine_pm_put(struct intel_engine_cs *engine) |
53 | { |
54 | intel_wakeref_put(wf: &engine->wakeref); |
55 | } |
56 | |
57 | static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) |
58 | { |
59 | intel_wakeref_put_async(wf: &engine->wakeref); |
60 | } |
61 | |
62 | static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine, |
63 | unsigned long delay) |
64 | { |
65 | intel_wakeref_put_delay(wf: &engine->wakeref, delay); |
66 | } |
67 | |
68 | static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) |
69 | { |
70 | intel_wakeref_unlock_wait(wf: &engine->wakeref); |
71 | } |
72 | |
73 | static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine) |
74 | { |
75 | if (!intel_engine_is_virtual(engine)) { |
76 | intel_wakeref_might_put(wf: &engine->wakeref); |
77 | } else { |
78 | struct intel_gt *gt = engine->gt; |
79 | struct intel_engine_cs *tengine; |
80 | intel_engine_mask_t tmp, mask = engine->mask; |
81 | |
82 | for_each_engine_masked(tengine, gt, mask, tmp) |
83 | intel_wakeref_might_put(wf: &tengine->wakeref); |
84 | } |
85 | intel_gt_pm_might_put(gt: engine->gt); |
86 | } |
87 | |
88 | static inline struct i915_request * |
89 | intel_engine_create_kernel_request(struct intel_engine_cs *engine) |
90 | { |
91 | struct i915_request *rq; |
92 | |
93 | /* |
94 | * The engine->kernel_context is special as it is used inside |
95 | * the engine-pm barrier (see __engine_park()), circumventing |
96 | * the usual mutexes and relying on the engine-pm barrier |
97 | * instead. So whenever we use the engine->kernel_context |
98 | * outside of the barrier, we must manually handle the |
99 | * engine wakeref to serialise with the use inside. |
100 | */ |
101 | intel_engine_pm_get(engine); |
102 | rq = i915_request_create(ce: engine->kernel_context); |
103 | intel_engine_pm_put(engine); |
104 | |
105 | return rq; |
106 | } |
107 | |
108 | void intel_engine_init__pm(struct intel_engine_cs *engine); |
109 | |
110 | void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine); |
111 | |
112 | #endif /* INTEL_ENGINE_PM_H */ |
113 | |