1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Author: Monk.liu@amd.com |
23 | */ |
24 | #ifndef AMDGPU_VIRT_H |
25 | #define AMDGPU_VIRT_H |
26 | |
27 | #include "amdgv_sriovmsg.h" |
28 | |
29 | #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ |
30 | #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ |
31 | #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ |
32 | #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ |
33 | #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ |
34 | #define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ |
35 | |
36 | /* flags for indirect register access path supported by rlcg for sriov */ |
37 | #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) |
38 | #define AMDGPU_RLCG_GC_WRITE (0x0 << 28) |
39 | #define AMDGPU_RLCG_GC_READ (0x1 << 28) |
40 | #define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28) |
41 | |
42 | /* error code for indirect register access path supported by rlcg for sriov */ |
43 | #define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000 |
44 | #define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000 |
45 | #define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000 |
46 | |
47 | #define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF |
48 | |
49 | /* all asic after AI use this offset */ |
50 | #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 |
51 | /* tonga/fiji use this offset */ |
52 | #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 |
53 | |
54 | enum amdgpu_sriov_vf_mode { |
55 | SRIOV_VF_MODE_BARE_METAL = 0, |
56 | SRIOV_VF_MODE_ONE_VF, |
57 | SRIOV_VF_MODE_MULTI_VF, |
58 | }; |
59 | |
60 | struct amdgpu_mm_table { |
61 | struct amdgpu_bo *bo; |
62 | uint32_t *cpu_addr; |
63 | uint64_t gpu_addr; |
64 | }; |
65 | |
66 | #define AMDGPU_VF_ERROR_ENTRY_SIZE 16 |
67 | |
68 | /* struct error_entry - amdgpu VF error information. */ |
69 | struct amdgpu_vf_error_buffer { |
70 | struct mutex lock; |
71 | int read_count; |
72 | int write_count; |
73 | uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
74 | uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
75 | uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; |
76 | }; |
77 | |
78 | enum idh_request; |
79 | |
80 | /** |
81 | * struct amdgpu_virt_ops - amdgpu device virt operations |
82 | */ |
83 | struct amdgpu_virt_ops { |
84 | int (*req_full_gpu)(struct amdgpu_device *adev, bool init); |
85 | int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); |
86 | int (*req_init_data)(struct amdgpu_device *adev); |
87 | int (*reset_gpu)(struct amdgpu_device *adev); |
88 | int (*wait_reset)(struct amdgpu_device *adev); |
89 | void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, |
90 | u32 data1, u32 data2, u32 data3); |
91 | void (*ras_poison_handler)(struct amdgpu_device *adev); |
92 | }; |
93 | |
94 | /* |
95 | * Firmware Reserve Frame buffer |
96 | */ |
97 | struct amdgpu_virt_fw_reserve { |
98 | struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; |
99 | struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; |
100 | unsigned int checksum_key; |
101 | }; |
102 | |
103 | /* |
104 | * Legacy GIM header |
105 | * |
106 | * Defination between PF and VF |
107 | * Structures forcibly aligned to 4 to keep the same style as PF. |
108 | */ |
109 | #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024) |
110 | |
111 | #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \ |
112 | (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2)) |
113 | |
114 | enum AMDGIM_FEATURE_FLAG { |
115 | /* GIM supports feature of Error log collecting */ |
116 | AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, |
117 | /* GIM supports feature of loading uCodes */ |
118 | AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, |
119 | /* VRAM LOST by GIM */ |
120 | AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, |
121 | /* MM bandwidth */ |
122 | AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, |
123 | /* PP ONE VF MODE in GIM */ |
124 | AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), |
125 | /* Indirect Reg Access enabled */ |
126 | AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), |
127 | /* AV1 Support MODE*/ |
128 | AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), |
129 | /* VCN RB decouple */ |
130 | AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), |
131 | }; |
132 | |
133 | enum AMDGIM_REG_ACCESS_FLAG { |
134 | /* Use PSP to program IH_RB_CNTL */ |
135 | AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0), |
136 | /* Use RLC to program MMHUB regs */ |
137 | AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), |
138 | /* Use RLC to program GC regs */ |
139 | AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), |
140 | }; |
141 | |
142 | struct amdgim_pf2vf_info_v1 { |
143 | /* header contains size and version */ |
144 | struct amd_sriov_msg_pf2vf_info_header ; |
145 | /* max_width * max_height */ |
146 | unsigned int uvd_enc_max_pixels_count; |
147 | /* 16x16 pixels/sec, codec independent */ |
148 | unsigned int uvd_enc_max_bandwidth; |
149 | /* max_width * max_height */ |
150 | unsigned int vce_enc_max_pixels_count; |
151 | /* 16x16 pixels/sec, codec independent */ |
152 | unsigned int vce_enc_max_bandwidth; |
153 | /* MEC FW position in kb from the start of visible frame buffer */ |
154 | unsigned int mecfw_kboffset; |
155 | /* The features flags of the GIM driver supports. */ |
156 | unsigned int feature_flags; |
157 | /* use private key from mailbox 2 to create chueksum */ |
158 | unsigned int checksum; |
159 | } __aligned(4); |
160 | |
161 | struct amdgim_vf2pf_info_v1 { |
162 | /* header contains size and version */ |
163 | struct amd_sriov_msg_vf2pf_info_header ; |
164 | /* driver version */ |
165 | char driver_version[64]; |
166 | /* driver certification, 1=WHQL, 0=None */ |
167 | unsigned int driver_cert; |
168 | /* guest OS type and version: need a define */ |
169 | unsigned int os_info; |
170 | /* in the unit of 1M */ |
171 | unsigned int fb_usage; |
172 | /* guest gfx engine usage percentage */ |
173 | unsigned int gfx_usage; |
174 | /* guest gfx engine health percentage */ |
175 | unsigned int gfx_health; |
176 | /* guest compute engine usage percentage */ |
177 | unsigned int compute_usage; |
178 | /* guest compute engine health percentage */ |
179 | unsigned int compute_health; |
180 | /* guest vce engine usage percentage. 0xffff means N/A. */ |
181 | unsigned int vce_enc_usage; |
182 | /* guest vce engine health percentage. 0xffff means N/A. */ |
183 | unsigned int vce_enc_health; |
184 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
185 | unsigned int uvd_enc_usage; |
186 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
187 | unsigned int uvd_enc_health; |
188 | unsigned int checksum; |
189 | } __aligned(4); |
190 | |
191 | struct amdgim_vf2pf_info_v2 { |
192 | /* header contains size and version */ |
193 | struct amd_sriov_msg_vf2pf_info_header ; |
194 | uint32_t checksum; |
195 | /* driver version */ |
196 | uint8_t driver_version[64]; |
197 | /* driver certification, 1=WHQL, 0=None */ |
198 | uint32_t driver_cert; |
199 | /* guest OS type and version: need a define */ |
200 | uint32_t os_info; |
201 | /* in the unit of 1M */ |
202 | uint32_t fb_usage; |
203 | /* guest gfx engine usage percentage */ |
204 | uint32_t gfx_usage; |
205 | /* guest gfx engine health percentage */ |
206 | uint32_t gfx_health; |
207 | /* guest compute engine usage percentage */ |
208 | uint32_t compute_usage; |
209 | /* guest compute engine health percentage */ |
210 | uint32_t compute_health; |
211 | /* guest vce engine usage percentage. 0xffff means N/A. */ |
212 | uint32_t vce_enc_usage; |
213 | /* guest vce engine health percentage. 0xffff means N/A. */ |
214 | uint32_t vce_enc_health; |
215 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
216 | uint32_t uvd_enc_usage; |
217 | /* guest uvd engine usage percentage. 0xffff means N/A. */ |
218 | uint32_t uvd_enc_health; |
219 | uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; |
220 | } __aligned(4); |
221 | |
222 | struct amdgpu_virt_ras_err_handler_data { |
223 | /* point to bad page records array */ |
224 | struct eeprom_table_record *bps; |
225 | /* point to reserved bo array */ |
226 | struct amdgpu_bo **bps_bo; |
227 | /* the count of entries */ |
228 | int count; |
229 | /* last reserved entry's index + 1 */ |
230 | int last_reserved; |
231 | }; |
232 | |
233 | /* GPU virtualization */ |
234 | struct amdgpu_virt { |
235 | uint32_t caps; |
236 | struct amdgpu_bo *csa_obj; |
237 | void *csa_cpu_addr; |
238 | bool chained_ib_support; |
239 | uint32_t reg_val_offs; |
240 | struct amdgpu_irq_src ack_irq; |
241 | struct amdgpu_irq_src rcv_irq; |
242 | struct work_struct flr_work; |
243 | struct amdgpu_mm_table mm_table; |
244 | const struct amdgpu_virt_ops *ops; |
245 | struct amdgpu_vf_error_buffer vf_errors; |
246 | struct amdgpu_virt_fw_reserve fw_reserve; |
247 | uint32_t gim_feature; |
248 | uint32_t reg_access_mode; |
249 | int req_init_data_ver; |
250 | bool tdr_debug; |
251 | struct amdgpu_virt_ras_err_handler_data *virt_eh_data; |
252 | bool ras_init_done; |
253 | uint32_t reg_access; |
254 | |
255 | /* vf2pf message */ |
256 | struct delayed_work vf2pf_work; |
257 | uint32_t vf2pf_update_interval_ms; |
258 | |
259 | /* multimedia bandwidth config */ |
260 | bool is_mm_bw_enabled; |
261 | uint32_t decode_max_dimension_pixels; |
262 | uint32_t decode_max_frame_pixels; |
263 | uint32_t encode_max_dimension_pixels; |
264 | uint32_t encode_max_frame_pixels; |
265 | |
266 | /* the ucode id to signal the autoload */ |
267 | uint32_t autoload_ucode_id; |
268 | }; |
269 | |
270 | struct amdgpu_video_codec_info; |
271 | |
272 | #define amdgpu_sriov_enabled(adev) \ |
273 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) |
274 | |
275 | #define amdgpu_sriov_vf(adev) \ |
276 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) |
277 | |
278 | #define amdgpu_sriov_bios(adev) \ |
279 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) |
280 | |
281 | #define amdgpu_sriov_runtime(adev) \ |
282 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) |
283 | |
284 | #define amdgpu_sriov_fullaccess(adev) \ |
285 | (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) |
286 | |
287 | #define amdgpu_sriov_reg_indirect_en(adev) \ |
288 | (amdgpu_sriov_vf((adev)) && \ |
289 | ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS))) |
290 | |
291 | #define amdgpu_sriov_reg_indirect_ih(adev) \ |
292 | (amdgpu_sriov_vf((adev)) && \ |
293 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN))) |
294 | |
295 | #define amdgpu_sriov_reg_indirect_mmhub(adev) \ |
296 | (amdgpu_sriov_vf((adev)) && \ |
297 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN))) |
298 | |
299 | #define amdgpu_sriov_reg_indirect_gc(adev) \ |
300 | (amdgpu_sriov_vf((adev)) && \ |
301 | ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN))) |
302 | |
303 | #define amdgpu_sriov_rlcg_error_report_enabled(adev) \ |
304 | (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) |
305 | |
306 | #define amdgpu_passthrough(adev) \ |
307 | ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) |
308 | |
309 | #define amdgpu_sriov_vf_mmio_access_protection(adev) \ |
310 | ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) |
311 | |
312 | static inline bool is_virtual_machine(void) |
313 | { |
314 | #if defined(CONFIG_X86) |
315 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); |
316 | #elif defined(CONFIG_ARM64) |
317 | return !is_kernel_in_hyp_mode(); |
318 | #else |
319 | return false; |
320 | #endif |
321 | } |
322 | |
323 | #define amdgpu_sriov_is_pp_one_vf(adev) \ |
324 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) |
325 | #define amdgpu_sriov_is_debug(adev) \ |
326 | ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) |
327 | #define amdgpu_sriov_is_normal(adev) \ |
328 | ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) |
329 | #define amdgpu_sriov_is_av1_support(adev) \ |
330 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) |
331 | #define amdgpu_sriov_is_vcn_rb_decouple(adev) \ |
332 | ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) |
333 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); |
334 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); |
335 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, |
336 | uint32_t reg0, uint32_t rreg1, |
337 | uint32_t ref, uint32_t mask); |
338 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); |
339 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); |
340 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); |
341 | void amdgpu_virt_request_init_data(struct amdgpu_device *adev); |
342 | int amdgpu_virt_wait_reset(struct amdgpu_device *adev); |
343 | int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); |
344 | void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); |
345 | void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); |
346 | void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); |
347 | void amdgpu_virt_exchange_data(struct amdgpu_device *adev); |
348 | void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); |
349 | void amdgpu_detect_virtualization(struct amdgpu_device *adev); |
350 | |
351 | bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); |
352 | int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); |
353 | void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); |
354 | |
355 | enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); |
356 | |
357 | void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, |
358 | struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, |
359 | struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); |
360 | void amdgpu_sriov_wreg(struct amdgpu_device *adev, |
361 | u32 offset, u32 value, |
362 | u32 acc_flags, u32 hwip, u32 xcc_id); |
363 | u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, |
364 | u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id); |
365 | bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, |
366 | uint32_t ucode_id); |
367 | void amdgpu_virt_post_reset(struct amdgpu_device *adev); |
368 | #endif |
369 | |