1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
21 | * SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Eddie Dong <eddie.dong@intel.com> |
25 | * Kevin Tian <kevin.tian@intel.com> |
26 | * |
27 | * Contributors: |
28 | * Zhi Wang <zhi.a.wang@intel.com> |
29 | * Changbin Du <changbin.du@intel.com> |
30 | * Zhenyu Wang <zhenyuw@linux.intel.com> |
31 | * Tina Zhang <tina.zhang@intel.com> |
32 | * Bing Niu <bing.niu@intel.com> |
33 | * |
34 | */ |
35 | |
36 | #include "i915_drv.h" |
37 | #include "i915_reg.h" |
38 | #include "gt/intel_context.h" |
39 | #include "gt/intel_engine_regs.h" |
40 | #include "gt/intel_gpu_commands.h" |
41 | #include "gt/intel_gt_regs.h" |
42 | #include "gt/intel_ring.h" |
43 | #include "gvt.h" |
44 | #include "trace.h" |
45 | |
46 | #define GEN9_MOCS_SIZE 64 |
47 | |
48 | struct engine_mmio { |
49 | enum intel_engine_id id; |
50 | i915_reg_t reg; |
51 | u32 mask; |
52 | bool in_context; |
53 | u32 value; |
54 | }; |
55 | |
56 | /* Raw offset is appened to each line for convenience. */ |
57 | static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { |
58 | {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ |
59 | {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ |
60 | {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ |
61 | {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ |
62 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ |
63 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ |
64 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ |
65 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ |
66 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ |
67 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ |
68 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ |
69 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ |
70 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ |
71 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ |
72 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ |
73 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ |
74 | {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ |
75 | {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ |
76 | {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ |
77 | {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ |
78 | {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ |
79 | {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ |
80 | |
81 | {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ |
82 | {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ |
83 | {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ |
84 | {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ |
85 | {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ |
86 | {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ |
87 | }; |
88 | |
89 | static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { |
90 | {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ |
91 | {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ |
92 | {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ |
93 | {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ |
94 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ |
95 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ |
96 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ |
97 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ |
98 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ |
99 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ |
100 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ |
101 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ |
102 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ |
103 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ |
104 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ |
105 | {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ |
106 | {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ |
107 | {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ |
108 | {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ |
109 | {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ |
110 | {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ |
111 | {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ |
112 | |
113 | {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ |
114 | {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ |
115 | {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ |
116 | {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ |
117 | {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ |
118 | {RCS0, _MMIO(0xb118), 0, false}, /* GEN8_L3SQCREG4 */ |
119 | {RCS0, _MMIO(0xb11c), 0, false}, /* GEN9_SCRATCH1 */ |
120 | {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */ |
121 | {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ |
122 | {RCS0, _MMIO(0xe180), 0xffff, true}, /* HALF_SLICE_CHICKEN2 */ |
123 | {RCS0, _MMIO(0xe184), 0xffff, true}, /* GEN8_HALF_SLICE_CHICKEN3 */ |
124 | {RCS0, _MMIO(0xe188), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN5 */ |
125 | {RCS0, _MMIO(0xe194), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN7 */ |
126 | {RCS0, _MMIO(0xe4f0), 0xffff, true}, /* GEN8_ROW_CHICKEN */ |
127 | {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ |
128 | {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ |
129 | {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ |
130 | {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ |
131 | {RCS0, TRVADR, 0, true}, /* 0x4df0 */ |
132 | {RCS0, TRTTE, 0, true}, /* 0x4df4 */ |
133 | {RCS0, _MMIO(0x4dfc), 0, true}, |
134 | |
135 | {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ |
136 | {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ |
137 | {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ |
138 | {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ |
139 | {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ |
140 | |
141 | {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ |
142 | |
143 | {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ |
144 | |
145 | {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ |
146 | {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ |
147 | {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ |
148 | {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ |
149 | |
150 | {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ |
151 | {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ |
152 | {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ |
153 | |
154 | {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ |
155 | {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ |
156 | {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ |
157 | {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ |
158 | }; |
159 | |
160 | static struct { |
161 | bool initialized; |
162 | u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; |
163 | u32 l3cc_table[GEN9_MOCS_SIZE / 2]; |
164 | } gen9_render_mocs; |
165 | |
166 | static u32 gen9_mocs_mmio_offset_list[] = { |
167 | [RCS0] = 0xc800, |
168 | [VCS0] = 0xc900, |
169 | [VCS1] = 0xca00, |
170 | [BCS0] = 0xcc00, |
171 | [VECS0] = 0xcb00, |
172 | }; |
173 | |
174 | static void load_render_mocs(const struct intel_engine_cs *engine) |
175 | { |
176 | struct intel_gvt *gvt = engine->i915->gvt; |
177 | struct intel_uncore *uncore = engine->uncore; |
178 | u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; |
179 | u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; |
180 | i915_reg_t offset; |
181 | int ring_id, i; |
182 | |
183 | /* Platform doesn't have mocs mmios. */ |
184 | if (!regs) |
185 | return; |
186 | |
187 | for (ring_id = 0; ring_id < cnt; ring_id++) { |
188 | if (!HAS_ENGINE(engine->gt, ring_id)) |
189 | continue; |
190 | |
191 | offset.reg = regs[ring_id]; |
192 | for (i = 0; i < GEN9_MOCS_SIZE; i++) { |
193 | gen9_render_mocs.control_table[ring_id][i] = |
194 | intel_uncore_read_fw(uncore, offset); |
195 | offset.reg += 4; |
196 | } |
197 | } |
198 | |
199 | offset.reg = 0xb020; |
200 | for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { |
201 | gen9_render_mocs.l3cc_table[i] = |
202 | intel_uncore_read_fw(uncore, offset); |
203 | offset.reg += 4; |
204 | } |
205 | gen9_render_mocs.initialized = true; |
206 | } |
207 | |
208 | static int |
209 | restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, |
210 | struct i915_request *req) |
211 | { |
212 | u32 *cs; |
213 | int ret; |
214 | struct engine_mmio *mmio; |
215 | struct intel_gvt *gvt = vgpu->gvt; |
216 | int ring_id = req->engine->id; |
217 | int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; |
218 | |
219 | if (count == 0) |
220 | return 0; |
221 | |
222 | ret = req->engine->emit_flush(req, EMIT_BARRIER); |
223 | if (ret) |
224 | return ret; |
225 | |
226 | cs = intel_ring_begin(rq: req, num_dwords: count * 2 + 2); |
227 | if (IS_ERR(ptr: cs)) |
228 | return PTR_ERR(ptr: cs); |
229 | |
230 | *cs++ = MI_LOAD_REGISTER_IMM(count); |
231 | for (mmio = gvt->engine_mmio_list.mmio; |
232 | i915_mmio_reg_valid(mmio->reg); mmio++) { |
233 | if (mmio->id != ring_id || !mmio->in_context) |
234 | continue; |
235 | |
236 | *cs++ = i915_mmio_reg_offset(mmio->reg); |
237 | *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); |
238 | gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n" , |
239 | *(cs-2), *(cs-1), vgpu->id, ring_id); |
240 | } |
241 | |
242 | *cs++ = MI_NOOP; |
243 | intel_ring_advance(rq: req, cs); |
244 | |
245 | ret = req->engine->emit_flush(req, EMIT_BARRIER); |
246 | if (ret) |
247 | return ret; |
248 | |
249 | return 0; |
250 | } |
251 | |
252 | static int |
253 | restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, |
254 | struct i915_request *req) |
255 | { |
256 | unsigned int index; |
257 | u32 *cs; |
258 | |
259 | cs = intel_ring_begin(rq: req, num_dwords: 2 * GEN9_MOCS_SIZE + 2); |
260 | if (IS_ERR(ptr: cs)) |
261 | return PTR_ERR(ptr: cs); |
262 | |
263 | *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); |
264 | |
265 | for (index = 0; index < GEN9_MOCS_SIZE; index++) { |
266 | *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); |
267 | *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); |
268 | gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n" , |
269 | *(cs-2), *(cs-1), vgpu->id, req->engine->id); |
270 | |
271 | } |
272 | |
273 | *cs++ = MI_NOOP; |
274 | intel_ring_advance(rq: req, cs); |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static int |
280 | restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, |
281 | struct i915_request *req) |
282 | { |
283 | unsigned int index; |
284 | u32 *cs; |
285 | |
286 | cs = intel_ring_begin(rq: req, num_dwords: 2 * GEN9_MOCS_SIZE / 2 + 2); |
287 | if (IS_ERR(ptr: cs)) |
288 | return PTR_ERR(ptr: cs); |
289 | |
290 | *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); |
291 | |
292 | for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { |
293 | *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); |
294 | *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); |
295 | gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n" , |
296 | *(cs-2), *(cs-1), vgpu->id, req->engine->id); |
297 | |
298 | } |
299 | |
300 | *cs++ = MI_NOOP; |
301 | intel_ring_advance(rq: req, cs); |
302 | |
303 | return 0; |
304 | } |
305 | |
306 | /* |
307 | * Use lri command to initialize the mmio which is in context state image for |
308 | * inhibit context, it contains tracked engine mmio, render_mocs and |
309 | * render_mocs_l3cc. |
310 | */ |
311 | int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, |
312 | struct i915_request *req) |
313 | { |
314 | int ret; |
315 | u32 *cs; |
316 | |
317 | cs = intel_ring_begin(rq: req, num_dwords: 2); |
318 | if (IS_ERR(ptr: cs)) |
319 | return PTR_ERR(ptr: cs); |
320 | |
321 | *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; |
322 | *cs++ = MI_NOOP; |
323 | intel_ring_advance(rq: req, cs); |
324 | |
325 | ret = restore_context_mmio_for_inhibit(vgpu, req); |
326 | if (ret) |
327 | goto out; |
328 | |
329 | /* no MOCS register in context except render engine */ |
330 | if (req->engine->id != RCS0) |
331 | goto out; |
332 | |
333 | ret = restore_render_mocs_control_for_inhibit(vgpu, req); |
334 | if (ret) |
335 | goto out; |
336 | |
337 | ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); |
338 | if (ret) |
339 | goto out; |
340 | |
341 | out: |
342 | cs = intel_ring_begin(rq: req, num_dwords: 2); |
343 | if (IS_ERR(ptr: cs)) |
344 | return PTR_ERR(ptr: cs); |
345 | |
346 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
347 | *cs++ = MI_NOOP; |
348 | intel_ring_advance(rq: req, cs); |
349 | |
350 | return ret; |
351 | } |
352 | |
353 | static u32 gen8_tlb_mmio_offset_list[] = { |
354 | [RCS0] = 0x4260, |
355 | [VCS0] = 0x4264, |
356 | [VCS1] = 0x4268, |
357 | [BCS0] = 0x426c, |
358 | [VECS0] = 0x4270, |
359 | }; |
360 | |
361 | static void handle_tlb_pending_event(struct intel_vgpu *vgpu, |
362 | const struct intel_engine_cs *engine) |
363 | { |
364 | struct intel_uncore *uncore = engine->uncore; |
365 | struct intel_vgpu_submission *s = &vgpu->submission; |
366 | u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; |
367 | u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; |
368 | enum forcewake_domains fw; |
369 | i915_reg_t reg; |
370 | |
371 | if (!regs) |
372 | return; |
373 | |
374 | if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) |
375 | return; |
376 | |
377 | if (!test_and_clear_bit(nr: engine->id, addr: (void *)s->tlb_handle_pending)) |
378 | return; |
379 | |
380 | reg = _MMIO(regs[engine->id]); |
381 | |
382 | /* WaForceWakeRenderDuringMmioTLBInvalidate:skl |
383 | * we need to put a forcewake when invalidating RCS TLB caches, |
384 | * otherwise device can go to RC6 state and interrupt invalidation |
385 | * process |
386 | */ |
387 | fw = intel_uncore_forcewake_for_reg(uncore, reg, |
388 | FW_REG_READ | FW_REG_WRITE); |
389 | if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9) |
390 | fw |= FORCEWAKE_RENDER; |
391 | |
392 | intel_uncore_forcewake_get(uncore, domains: fw); |
393 | |
394 | intel_uncore_write_fw(uncore, reg, 0x1); |
395 | |
396 | if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50)) |
397 | gvt_vgpu_err("timeout in invalidate ring %s tlb\n" , |
398 | engine->name); |
399 | else |
400 | vgpu_vreg_t(vgpu, reg) = 0; |
401 | |
402 | intel_uncore_forcewake_put(uncore, domains: fw); |
403 | |
404 | gvt_dbg_core("invalidate TLB for ring %s\n" , engine->name); |
405 | } |
406 | |
407 | static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, |
408 | const struct intel_engine_cs *engine) |
409 | { |
410 | u32 regs[] = { |
411 | [RCS0] = 0xc800, |
412 | [VCS0] = 0xc900, |
413 | [VCS1] = 0xca00, |
414 | [BCS0] = 0xcc00, |
415 | [VECS0] = 0xcb00, |
416 | }; |
417 | struct intel_uncore *uncore = engine->uncore; |
418 | i915_reg_t offset, l3_offset; |
419 | u32 old_v, new_v; |
420 | int i; |
421 | |
422 | if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) |
423 | return; |
424 | |
425 | if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9) |
426 | return; |
427 | |
428 | if (!pre && !gen9_render_mocs.initialized) |
429 | load_render_mocs(engine); |
430 | |
431 | offset.reg = regs[engine->id]; |
432 | for (i = 0; i < GEN9_MOCS_SIZE; i++) { |
433 | if (pre) |
434 | old_v = vgpu_vreg_t(pre, offset); |
435 | else |
436 | old_v = gen9_render_mocs.control_table[engine->id][i]; |
437 | if (next) |
438 | new_v = vgpu_vreg_t(next, offset); |
439 | else |
440 | new_v = gen9_render_mocs.control_table[engine->id][i]; |
441 | |
442 | if (old_v != new_v) |
443 | intel_uncore_write_fw(uncore, offset, new_v); |
444 | |
445 | offset.reg += 4; |
446 | } |
447 | |
448 | if (engine->id == RCS0) { |
449 | l3_offset.reg = 0xb020; |
450 | for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { |
451 | if (pre) |
452 | old_v = vgpu_vreg_t(pre, l3_offset); |
453 | else |
454 | old_v = gen9_render_mocs.l3cc_table[i]; |
455 | if (next) |
456 | new_v = vgpu_vreg_t(next, l3_offset); |
457 | else |
458 | new_v = gen9_render_mocs.l3cc_table[i]; |
459 | |
460 | if (old_v != new_v) |
461 | intel_uncore_write_fw(uncore, l3_offset, new_v); |
462 | |
463 | l3_offset.reg += 4; |
464 | } |
465 | } |
466 | } |
467 | |
468 | #define CTX_CONTEXT_CONTROL_VAL 0x03 |
469 | |
470 | bool is_inhibit_context(struct intel_context *ce) |
471 | { |
472 | const u32 *reg_state = ce->lrc_reg_state; |
473 | u32 inhibit_mask = |
474 | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); |
475 | |
476 | return inhibit_mask == |
477 | (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); |
478 | } |
479 | |
480 | /* Switch ring mmio values (context). */ |
481 | static void switch_mmio(struct intel_vgpu *pre, |
482 | struct intel_vgpu *next, |
483 | const struct intel_engine_cs *engine) |
484 | { |
485 | struct intel_uncore *uncore = engine->uncore; |
486 | struct intel_vgpu_submission *s; |
487 | struct engine_mmio *mmio; |
488 | u32 old_v, new_v; |
489 | |
490 | if (GRAPHICS_VER(engine->i915) >= 9) |
491 | switch_mocs(pre, next, engine); |
492 | |
493 | for (mmio = engine->i915->gvt->engine_mmio_list.mmio; |
494 | i915_mmio_reg_valid(mmio->reg); mmio++) { |
495 | if (mmio->id != engine->id) |
496 | continue; |
497 | /* |
498 | * No need to do save or restore of the mmio which is in context |
499 | * state image on gen9, it's initialized by lri command and |
500 | * save or restore with context together. |
501 | */ |
502 | if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) |
503 | continue; |
504 | |
505 | // save |
506 | if (pre) { |
507 | vgpu_vreg_t(pre, mmio->reg) = |
508 | intel_uncore_read_fw(uncore, mmio->reg); |
509 | if (mmio->mask) |
510 | vgpu_vreg_t(pre, mmio->reg) &= |
511 | ~(mmio->mask << 16); |
512 | old_v = vgpu_vreg_t(pre, mmio->reg); |
513 | } else { |
514 | old_v = mmio->value = |
515 | intel_uncore_read_fw(uncore, mmio->reg); |
516 | } |
517 | |
518 | // restore |
519 | if (next) { |
520 | s = &next->submission; |
521 | /* |
522 | * No need to restore the mmio which is in context state |
523 | * image if it's not inhibit context, it will restore |
524 | * itself. |
525 | */ |
526 | if (mmio->in_context && |
527 | !is_inhibit_context(ce: s->shadow[engine->id])) |
528 | continue; |
529 | |
530 | if (mmio->mask) |
531 | new_v = vgpu_vreg_t(next, mmio->reg) | |
532 | (mmio->mask << 16); |
533 | else |
534 | new_v = vgpu_vreg_t(next, mmio->reg); |
535 | } else { |
536 | if (mmio->in_context) |
537 | continue; |
538 | if (mmio->mask) |
539 | new_v = mmio->value | (mmio->mask << 16); |
540 | else |
541 | new_v = mmio->value; |
542 | } |
543 | |
544 | intel_uncore_write_fw(uncore, mmio->reg, new_v); |
545 | |
546 | trace_render_mmio(old_id: pre ? pre->id : 0, |
547 | new_id: next ? next->id : 0, |
548 | action: "switch" , |
549 | i915_mmio_reg_offset(mmio->reg), |
550 | old_val: old_v, new_val: new_v); |
551 | } |
552 | |
553 | if (next) |
554 | handle_tlb_pending_event(vgpu: next, engine); |
555 | } |
556 | |
557 | /** |
558 | * intel_gvt_switch_mmio - switch mmio context of specific engine |
559 | * @pre: the last vGPU that own the engine |
560 | * @next: the vGPU to switch to |
561 | * @engine: the engine |
562 | * |
563 | * If pre is null indicates that host own the engine. If next is null |
564 | * indicates that we are switching to host workload. |
565 | */ |
566 | void intel_gvt_switch_mmio(struct intel_vgpu *pre, |
567 | struct intel_vgpu *next, |
568 | const struct intel_engine_cs *engine) |
569 | { |
570 | if (WARN(!pre && !next, "switch ring %s from host to HOST\n" , |
571 | engine->name)) |
572 | return; |
573 | |
574 | gvt_dbg_render("switch ring %s from %s to %s\n" , engine->name, |
575 | pre ? "vGPU" : "host" , next ? "vGPU" : "HOST" ); |
576 | |
577 | /** |
578 | * We are using raw mmio access wrapper to improve the |
579 | * performace for batch mmio read/write, so we need |
580 | * handle forcewake mannually. |
581 | */ |
582 | intel_uncore_forcewake_get(uncore: engine->uncore, domains: FORCEWAKE_ALL); |
583 | switch_mmio(pre, next, engine); |
584 | intel_uncore_forcewake_put(uncore: engine->uncore, domains: FORCEWAKE_ALL); |
585 | } |
586 | |
587 | /** |
588 | * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list |
589 | * @gvt: GVT device |
590 | * |
591 | */ |
592 | void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) |
593 | { |
594 | struct engine_mmio *mmio; |
595 | |
596 | if (GRAPHICS_VER(gvt->gt->i915) >= 9) { |
597 | gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; |
598 | gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; |
599 | gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); |
600 | gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; |
601 | gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); |
602 | } else { |
603 | gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; |
604 | gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; |
605 | gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); |
606 | } |
607 | |
608 | for (mmio = gvt->engine_mmio_list.mmio; |
609 | i915_mmio_reg_valid(mmio->reg); mmio++) { |
610 | if (mmio->in_context) { |
611 | gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; |
612 | intel_gvt_mmio_set_sr_in_ctx(gvt, offset: mmio->reg.reg); |
613 | } |
614 | } |
615 | } |
616 | |