1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2018 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/nospec.h> |
8 | |
9 | #include "i915_drv.h" |
10 | #include "i915_perf.h" |
11 | #include "i915_query.h" |
12 | #include "gt/intel_engine_user.h" |
13 | #include <uapi/drm/i915_drm.h> |
14 | |
15 | static int copy_query_item(void *query_hdr, size_t query_sz, |
16 | u32 total_length, |
17 | struct drm_i915_query_item *query_item) |
18 | { |
19 | if (query_item->length == 0) |
20 | return total_length; |
21 | |
22 | if (query_item->length < total_length) |
23 | return -EINVAL; |
24 | |
25 | if (copy_from_user(to: query_hdr, u64_to_user_ptr(query_item->data_ptr), |
26 | n: query_sz)) |
27 | return -EFAULT; |
28 | |
29 | return 0; |
30 | } |
31 | |
32 | static int fill_topology_info(const struct sseu_dev_info *sseu, |
33 | struct drm_i915_query_item *query_item, |
34 | intel_sseu_ss_mask_t subslice_mask) |
35 | { |
36 | struct drm_i915_query_topology_info topo; |
37 | u32 slice_length, subslice_length, eu_length, total_length; |
38 | int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); |
39 | int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); |
40 | int ret; |
41 | |
42 | BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); |
43 | |
44 | if (sseu->max_slices == 0) |
45 | return -ENODEV; |
46 | |
47 | slice_length = sizeof(sseu->slice_mask); |
48 | subslice_length = sseu->max_slices * ss_stride; |
49 | eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; |
50 | total_length = sizeof(topo) + slice_length + subslice_length + |
51 | eu_length; |
52 | |
53 | ret = copy_query_item(query_hdr: &topo, query_sz: sizeof(topo), total_length, query_item); |
54 | |
55 | if (ret != 0) |
56 | return ret; |
57 | |
58 | memset(&topo, 0, sizeof(topo)); |
59 | topo.max_slices = sseu->max_slices; |
60 | topo.max_subslices = sseu->max_subslices; |
61 | topo.max_eus_per_subslice = sseu->max_eus_per_subslice; |
62 | |
63 | topo.subslice_offset = slice_length; |
64 | topo.subslice_stride = ss_stride; |
65 | topo.eu_offset = slice_length + subslice_length; |
66 | topo.eu_stride = eu_stride; |
67 | |
68 | if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), |
69 | from: &topo, n: sizeof(topo))) |
70 | return -EFAULT; |
71 | |
72 | if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), |
73 | from: &sseu->slice_mask, n: slice_length)) |
74 | return -EFAULT; |
75 | |
76 | if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr + |
77 | sizeof(topo) + slice_length), |
78 | sseu)) |
79 | return -EFAULT; |
80 | |
81 | if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr + |
82 | sizeof(topo) + |
83 | slice_length + subslice_length), |
84 | sseu)) |
85 | return -EFAULT; |
86 | |
87 | return total_length; |
88 | } |
89 | |
90 | static int query_topology_info(struct drm_i915_private *dev_priv, |
91 | struct drm_i915_query_item *query_item) |
92 | { |
93 | const struct sseu_dev_info *sseu = &to_gt(i915: dev_priv)->info.sseu; |
94 | |
95 | if (query_item->flags != 0) |
96 | return -EINVAL; |
97 | |
98 | return fill_topology_info(sseu, query_item, subslice_mask: sseu->subslice_mask); |
99 | } |
100 | |
101 | static int query_geometry_subslices(struct drm_i915_private *i915, |
102 | struct drm_i915_query_item *query_item) |
103 | { |
104 | const struct sseu_dev_info *sseu; |
105 | struct intel_engine_cs *engine; |
106 | struct i915_engine_class_instance classinstance; |
107 | |
108 | if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) |
109 | return -ENODEV; |
110 | |
111 | classinstance = *((struct i915_engine_class_instance *)&query_item->flags); |
112 | |
113 | engine = intel_engine_lookup_user(i915, class: (u8)classinstance.engine_class, |
114 | instance: (u8)classinstance.engine_instance); |
115 | |
116 | if (!engine) |
117 | return -EINVAL; |
118 | |
119 | if (engine->class != RENDER_CLASS) |
120 | return -EINVAL; |
121 | |
122 | sseu = &engine->gt->info.sseu; |
123 | |
124 | return fill_topology_info(sseu, query_item, subslice_mask: sseu->geometry_subslice_mask); |
125 | } |
126 | |
127 | static int |
128 | query_engine_info(struct drm_i915_private *i915, |
129 | struct drm_i915_query_item *query_item) |
130 | { |
131 | struct drm_i915_query_engine_info __user *query_ptr = |
132 | u64_to_user_ptr(query_item->data_ptr); |
133 | struct drm_i915_engine_info __user *info_ptr; |
134 | struct drm_i915_query_engine_info query; |
135 | struct drm_i915_engine_info info = { }; |
136 | unsigned int num_uabi_engines = 0; |
137 | struct intel_engine_cs *engine; |
138 | int len, ret; |
139 | |
140 | if (query_item->flags) |
141 | return -EINVAL; |
142 | |
143 | for_each_uabi_engine(engine, i915) |
144 | num_uabi_engines++; |
145 | |
146 | len = struct_size(query_ptr, engines, num_uabi_engines); |
147 | |
148 | ret = copy_query_item(query_hdr: &query, query_sz: sizeof(query), total_length: len, query_item); |
149 | if (ret != 0) |
150 | return ret; |
151 | |
152 | if (query.num_engines || query.rsvd[0] || query.rsvd[1] || |
153 | query.rsvd[2]) |
154 | return -EINVAL; |
155 | |
156 | info_ptr = &query_ptr->engines[0]; |
157 | |
158 | for_each_uabi_engine(engine, i915) { |
159 | info.engine.engine_class = engine->uabi_class; |
160 | info.engine.engine_instance = engine->uabi_instance; |
161 | info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; |
162 | info.capabilities = engine->uabi_capabilities; |
163 | info.logical_instance = ilog2(engine->logical_mask); |
164 | |
165 | if (copy_to_user(to: info_ptr, from: &info, n: sizeof(info))) |
166 | return -EFAULT; |
167 | |
168 | query.num_engines++; |
169 | info_ptr++; |
170 | } |
171 | |
172 | if (copy_to_user(to: query_ptr, from: &query, n: sizeof(query))) |
173 | return -EFAULT; |
174 | |
175 | return len; |
176 | } |
177 | |
178 | static int can_copy_perf_config_registers_or_number(u32 user_n_regs, |
179 | u64 user_regs_ptr, |
180 | u32 kernel_n_regs) |
181 | { |
182 | /* |
183 | * We'll just put the number of registers, and won't copy the |
184 | * register. |
185 | */ |
186 | if (user_n_regs == 0) |
187 | return 0; |
188 | |
189 | if (user_n_regs < kernel_n_regs) |
190 | return -EINVAL; |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, |
196 | u32 kernel_n_regs, |
197 | u64 user_regs_ptr, |
198 | u32 *user_n_regs) |
199 | { |
200 | u32 __user *p = u64_to_user_ptr(user_regs_ptr); |
201 | u32 r; |
202 | |
203 | if (*user_n_regs == 0) { |
204 | *user_n_regs = kernel_n_regs; |
205 | return 0; |
206 | } |
207 | |
208 | *user_n_regs = kernel_n_regs; |
209 | |
210 | if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) |
211 | return -EFAULT; |
212 | |
213 | for (r = 0; r < kernel_n_regs; r++, p += 2) { |
214 | unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), |
215 | p, Efault); |
216 | unsafe_put_user(kernel_regs[r].value, p + 1, Efault); |
217 | } |
218 | user_write_access_end(); |
219 | return 0; |
220 | Efault: |
221 | user_write_access_end(); |
222 | return -EFAULT; |
223 | } |
224 | |
225 | static int query_perf_config_data(struct drm_i915_private *i915, |
226 | struct drm_i915_query_item *query_item, |
227 | bool use_uuid) |
228 | { |
229 | struct drm_i915_query_perf_config __user *user_query_config_ptr = |
230 | u64_to_user_ptr(query_item->data_ptr); |
231 | struct drm_i915_perf_oa_config __user *user_config_ptr = |
232 | u64_to_user_ptr(query_item->data_ptr + |
233 | sizeof(struct drm_i915_query_perf_config)); |
234 | struct drm_i915_perf_oa_config user_config; |
235 | struct i915_perf *perf = &i915->perf; |
236 | struct i915_oa_config *oa_config; |
237 | char uuid[UUID_STRING_LEN + 1]; |
238 | u64 config_id; |
239 | u32 flags, total_size; |
240 | int ret; |
241 | |
242 | if (!perf->i915) |
243 | return -ENODEV; |
244 | |
245 | total_size = |
246 | sizeof(struct drm_i915_query_perf_config) + |
247 | sizeof(struct drm_i915_perf_oa_config); |
248 | |
249 | if (query_item->length == 0) |
250 | return total_size; |
251 | |
252 | if (query_item->length < total_size) { |
253 | drm_dbg(&i915->drm, |
254 | "Invalid query config data item size=%u expected=%u\n" , |
255 | query_item->length, total_size); |
256 | return -EINVAL; |
257 | } |
258 | |
259 | if (get_user(flags, &user_query_config_ptr->flags)) |
260 | return -EFAULT; |
261 | |
262 | if (flags != 0) |
263 | return -EINVAL; |
264 | |
265 | if (use_uuid) { |
266 | struct i915_oa_config *tmp; |
267 | int id; |
268 | |
269 | BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); |
270 | |
271 | memset(&uuid, 0, sizeof(uuid)); |
272 | if (copy_from_user(to: uuid, from: user_query_config_ptr->uuid, |
273 | n: sizeof(user_query_config_ptr->uuid))) |
274 | return -EFAULT; |
275 | |
276 | oa_config = NULL; |
277 | rcu_read_lock(); |
278 | idr_for_each_entry(&perf->metrics_idr, tmp, id) { |
279 | if (!strcmp(tmp->uuid, uuid)) { |
280 | oa_config = i915_oa_config_get(oa_config: tmp); |
281 | break; |
282 | } |
283 | } |
284 | rcu_read_unlock(); |
285 | } else { |
286 | if (get_user(config_id, &user_query_config_ptr->config)) |
287 | return -EFAULT; |
288 | |
289 | oa_config = i915_perf_get_oa_config(perf, metrics_set: config_id); |
290 | } |
291 | if (!oa_config) |
292 | return -ENOENT; |
293 | |
294 | if (copy_from_user(to: &user_config, from: user_config_ptr, n: sizeof(user_config))) { |
295 | ret = -EFAULT; |
296 | goto out; |
297 | } |
298 | |
299 | ret = can_copy_perf_config_registers_or_number(user_n_regs: user_config.n_boolean_regs, |
300 | user_regs_ptr: user_config.boolean_regs_ptr, |
301 | kernel_n_regs: oa_config->b_counter_regs_len); |
302 | if (ret) |
303 | goto out; |
304 | |
305 | ret = can_copy_perf_config_registers_or_number(user_n_regs: user_config.n_flex_regs, |
306 | user_regs_ptr: user_config.flex_regs_ptr, |
307 | kernel_n_regs: oa_config->flex_regs_len); |
308 | if (ret) |
309 | goto out; |
310 | |
311 | ret = can_copy_perf_config_registers_or_number(user_n_regs: user_config.n_mux_regs, |
312 | user_regs_ptr: user_config.mux_regs_ptr, |
313 | kernel_n_regs: oa_config->mux_regs_len); |
314 | if (ret) |
315 | goto out; |
316 | |
317 | ret = copy_perf_config_registers_or_number(kernel_regs: oa_config->b_counter_regs, |
318 | kernel_n_regs: oa_config->b_counter_regs_len, |
319 | user_regs_ptr: user_config.boolean_regs_ptr, |
320 | user_n_regs: &user_config.n_boolean_regs); |
321 | if (ret) |
322 | goto out; |
323 | |
324 | ret = copy_perf_config_registers_or_number(kernel_regs: oa_config->flex_regs, |
325 | kernel_n_regs: oa_config->flex_regs_len, |
326 | user_regs_ptr: user_config.flex_regs_ptr, |
327 | user_n_regs: &user_config.n_flex_regs); |
328 | if (ret) |
329 | goto out; |
330 | |
331 | ret = copy_perf_config_registers_or_number(kernel_regs: oa_config->mux_regs, |
332 | kernel_n_regs: oa_config->mux_regs_len, |
333 | user_regs_ptr: user_config.mux_regs_ptr, |
334 | user_n_regs: &user_config.n_mux_regs); |
335 | if (ret) |
336 | goto out; |
337 | |
338 | memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); |
339 | |
340 | if (copy_to_user(to: user_config_ptr, from: &user_config, n: sizeof(user_config))) { |
341 | ret = -EFAULT; |
342 | goto out; |
343 | } |
344 | |
345 | ret = total_size; |
346 | |
347 | out: |
348 | i915_oa_config_put(oa_config); |
349 | return ret; |
350 | } |
351 | |
352 | static size_t sizeof_perf_config_list(size_t count) |
353 | { |
354 | return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; |
355 | } |
356 | |
357 | static size_t sizeof_perf_metrics(struct i915_perf *perf) |
358 | { |
359 | struct i915_oa_config *tmp; |
360 | size_t i; |
361 | int id; |
362 | |
363 | i = 1; |
364 | rcu_read_lock(); |
365 | idr_for_each_entry(&perf->metrics_idr, tmp, id) |
366 | i++; |
367 | rcu_read_unlock(); |
368 | |
369 | return sizeof_perf_config_list(count: i); |
370 | } |
371 | |
372 | static int query_perf_config_list(struct drm_i915_private *i915, |
373 | struct drm_i915_query_item *query_item) |
374 | { |
375 | struct drm_i915_query_perf_config __user *user_query_config_ptr = |
376 | u64_to_user_ptr(query_item->data_ptr); |
377 | struct i915_perf *perf = &i915->perf; |
378 | u64 *oa_config_ids = NULL; |
379 | int alloc, n_configs; |
380 | u32 flags; |
381 | int ret; |
382 | |
383 | if (!perf->i915) |
384 | return -ENODEV; |
385 | |
386 | if (query_item->length == 0) |
387 | return sizeof_perf_metrics(perf); |
388 | |
389 | if (get_user(flags, &user_query_config_ptr->flags)) |
390 | return -EFAULT; |
391 | |
392 | if (flags != 0) |
393 | return -EINVAL; |
394 | |
395 | n_configs = 1; |
396 | do { |
397 | struct i915_oa_config *tmp; |
398 | u64 *ids; |
399 | int id; |
400 | |
401 | ids = krealloc(objp: oa_config_ids, |
402 | new_size: n_configs * sizeof(*oa_config_ids), |
403 | GFP_KERNEL); |
404 | if (!ids) |
405 | return -ENOMEM; |
406 | |
407 | alloc = fetch_and_zero(&n_configs); |
408 | |
409 | ids[n_configs++] = 1ull; /* reserved for test_config */ |
410 | rcu_read_lock(); |
411 | idr_for_each_entry(&perf->metrics_idr, tmp, id) { |
412 | if (n_configs < alloc) |
413 | ids[n_configs] = id; |
414 | n_configs++; |
415 | } |
416 | rcu_read_unlock(); |
417 | |
418 | oa_config_ids = ids; |
419 | } while (n_configs > alloc); |
420 | |
421 | if (query_item->length < sizeof_perf_config_list(count: n_configs)) { |
422 | drm_dbg(&i915->drm, |
423 | "Invalid query config list item size=%u expected=%zu\n" , |
424 | query_item->length, |
425 | sizeof_perf_config_list(n_configs)); |
426 | kfree(objp: oa_config_ids); |
427 | return -EINVAL; |
428 | } |
429 | |
430 | if (put_user(n_configs, &user_query_config_ptr->config)) { |
431 | kfree(objp: oa_config_ids); |
432 | return -EFAULT; |
433 | } |
434 | |
435 | ret = copy_to_user(to: user_query_config_ptr + 1, |
436 | from: oa_config_ids, |
437 | n: n_configs * sizeof(*oa_config_ids)); |
438 | kfree(objp: oa_config_ids); |
439 | if (ret) |
440 | return -EFAULT; |
441 | |
442 | return sizeof_perf_config_list(count: n_configs); |
443 | } |
444 | |
445 | static int query_perf_config(struct drm_i915_private *i915, |
446 | struct drm_i915_query_item *query_item) |
447 | { |
448 | switch (query_item->flags) { |
449 | case DRM_I915_QUERY_PERF_CONFIG_LIST: |
450 | return query_perf_config_list(i915, query_item); |
451 | case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: |
452 | return query_perf_config_data(i915, query_item, use_uuid: true); |
453 | case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: |
454 | return query_perf_config_data(i915, query_item, use_uuid: false); |
455 | default: |
456 | return -EINVAL; |
457 | } |
458 | } |
459 | |
460 | static int query_memregion_info(struct drm_i915_private *i915, |
461 | struct drm_i915_query_item *query_item) |
462 | { |
463 | struct drm_i915_query_memory_regions __user *query_ptr = |
464 | u64_to_user_ptr(query_item->data_ptr); |
465 | struct drm_i915_memory_region_info __user *info_ptr = |
466 | &query_ptr->regions[0]; |
467 | struct drm_i915_memory_region_info info = { }; |
468 | struct drm_i915_query_memory_regions query; |
469 | struct intel_memory_region *mr; |
470 | u32 total_length; |
471 | int ret, id, i; |
472 | |
473 | if (query_item->flags != 0) |
474 | return -EINVAL; |
475 | |
476 | total_length = sizeof(query); |
477 | for_each_memory_region(mr, i915, id) { |
478 | if (mr->private) |
479 | continue; |
480 | |
481 | total_length += sizeof(info); |
482 | } |
483 | |
484 | ret = copy_query_item(query_hdr: &query, query_sz: sizeof(query), total_length, query_item); |
485 | if (ret != 0) |
486 | return ret; |
487 | |
488 | if (query.num_regions) |
489 | return -EINVAL; |
490 | |
491 | for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) { |
492 | if (query.rsvd[i]) |
493 | return -EINVAL; |
494 | } |
495 | |
496 | for_each_memory_region(mr, i915, id) { |
497 | if (mr->private) |
498 | continue; |
499 | |
500 | info.region.memory_class = mr->type; |
501 | info.region.memory_instance = mr->instance; |
502 | info.probed_size = mr->total; |
503 | |
504 | if (mr->type == INTEL_MEMORY_LOCAL) |
505 | info.probed_cpu_visible_size = mr->io_size; |
506 | else |
507 | info.probed_cpu_visible_size = mr->total; |
508 | |
509 | if (perfmon_capable()) { |
510 | intel_memory_region_avail(mr, |
511 | avail: &info.unallocated_size, |
512 | visible_avail: &info.unallocated_cpu_visible_size); |
513 | } else { |
514 | info.unallocated_size = info.probed_size; |
515 | info.unallocated_cpu_visible_size = |
516 | info.probed_cpu_visible_size; |
517 | } |
518 | |
519 | if (__copy_to_user(to: info_ptr, from: &info, n: sizeof(info))) |
520 | return -EFAULT; |
521 | |
522 | query.num_regions++; |
523 | info_ptr++; |
524 | } |
525 | |
526 | if (__copy_to_user(to: query_ptr, from: &query, n: sizeof(query))) |
527 | return -EFAULT; |
528 | |
529 | return total_length; |
530 | } |
531 | |
532 | static int query_hwconfig_blob(struct drm_i915_private *i915, |
533 | struct drm_i915_query_item *query_item) |
534 | { |
535 | struct intel_gt *gt = to_gt(i915); |
536 | struct intel_hwconfig *hwconfig = >->info.hwconfig; |
537 | |
538 | if (!hwconfig->size || !hwconfig->ptr) |
539 | return -ENODEV; |
540 | |
541 | if (query_item->length == 0) |
542 | return hwconfig->size; |
543 | |
544 | if (query_item->length < hwconfig->size) |
545 | return -EINVAL; |
546 | |
547 | if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), |
548 | from: hwconfig->ptr, n: hwconfig->size)) |
549 | return -EFAULT; |
550 | |
551 | return hwconfig->size; |
552 | } |
553 | |
554 | static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, |
555 | struct drm_i915_query_item *query_item) = { |
556 | query_topology_info, |
557 | query_engine_info, |
558 | query_perf_config, |
559 | query_memregion_info, |
560 | query_hwconfig_blob, |
561 | query_geometry_subslices, |
562 | }; |
563 | |
564 | int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
565 | { |
566 | struct drm_i915_private *dev_priv = to_i915(dev); |
567 | struct drm_i915_query *args = data; |
568 | struct drm_i915_query_item __user *user_item_ptr = |
569 | u64_to_user_ptr(args->items_ptr); |
570 | u32 i; |
571 | |
572 | if (args->flags != 0) |
573 | return -EINVAL; |
574 | |
575 | for (i = 0; i < args->num_items; i++, user_item_ptr++) { |
576 | struct drm_i915_query_item item; |
577 | unsigned long func_idx; |
578 | int ret; |
579 | |
580 | if (copy_from_user(to: &item, from: user_item_ptr, n: sizeof(item))) |
581 | return -EFAULT; |
582 | |
583 | if (item.query_id == 0) |
584 | return -EINVAL; |
585 | |
586 | if (overflows_type(item.query_id - 1, unsigned long)) |
587 | return -EINVAL; |
588 | |
589 | func_idx = item.query_id - 1; |
590 | |
591 | ret = -EINVAL; |
592 | if (func_idx < ARRAY_SIZE(i915_query_funcs)) { |
593 | func_idx = array_index_nospec(func_idx, |
594 | ARRAY_SIZE(i915_query_funcs)); |
595 | ret = i915_query_funcs[func_idx](dev_priv, &item); |
596 | } |
597 | |
598 | /* Only write the length back to userspace if they differ. */ |
599 | if (ret != item.length && put_user(ret, &user_item_ptr->length)) |
600 | return -EFAULT; |
601 | } |
602 | |
603 | return 0; |
604 | } |
605 | |