1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2019 Advanced Micro Devices, Inc.
4 */
5
6#include <linux/device.h>
7#include <linux/tee.h>
8#include <linux/tee_drv.h>
9#include <linux/psp-tee.h>
10#include <linux/slab.h>
11#include <linux/psp.h>
12#include "amdtee_if.h"
13#include "amdtee_private.h"
14
15static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
16 struct tee_operation *amd)
17{
18 int i, ret = 0;
19 u32 type;
20
21 if (!count)
22 return 0;
23
24 if (!tee || !amd || count > TEE_MAX_PARAMS)
25 return -EINVAL;
26
27 amd->param_types = 0;
28 for (i = 0; i < count; i++) {
29 /* AMD TEE does not support meta parameter */
30 if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
31 return -EINVAL;
32
33 amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
34 }
35
36 for (i = 0; i < count; i++) {
37 type = TEE_PARAM_TYPE_GET(amd->param_types, i);
38 pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
39
40 if (type == TEE_OP_PARAM_TYPE_INVALID)
41 return -EINVAL;
42
43 if (type == TEE_OP_PARAM_TYPE_NONE)
44 continue;
45
46 /* It is assumed that all values are within 2^32-1 */
47 if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
48 u32 buf_id = get_buffer_id(shm: tee[i].u.memref.shm);
49
50 amd->params[i].mref.buf_id = buf_id;
51 amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
52 amd->params[i].mref.size = tee[i].u.memref.size;
53 pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
54 __func__,
55 i, amd->params[i].mref.buf_id,
56 i, amd->params[i].mref.offset,
57 i, amd->params[i].mref.size);
58 } else {
59 if (tee[i].u.value.c)
60 pr_warn("%s: Discarding value c", __func__);
61
62 amd->params[i].val.a = tee[i].u.value.a;
63 amd->params[i].val.b = tee[i].u.value.b;
64 pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
65 i, amd->params[i].val.a,
66 i, amd->params[i].val.b);
67 }
68 }
69 return ret;
70}
71
72static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
73 struct tee_operation *amd)
74{
75 int i, ret = 0;
76 u32 type;
77
78 if (!count)
79 return 0;
80
81 if (!tee || !amd || count > TEE_MAX_PARAMS)
82 return -EINVAL;
83
84 /* Assumes amd->param_types is valid */
85 for (i = 0; i < count; i++) {
86 type = TEE_PARAM_TYPE_GET(amd->param_types, i);
87 pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
88
89 if (type == TEE_OP_PARAM_TYPE_INVALID ||
90 type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
91 return -EINVAL;
92
93 if (type == TEE_OP_PARAM_TYPE_NONE ||
94 type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
95 type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
96 continue;
97
98 /*
99 * It is assumed that buf_id remains unchanged for
100 * both open_session and invoke_cmd call
101 */
102 if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
103 tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
104 tee[i].u.memref.size = amd->params[i].mref.size;
105 pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
106 __func__,
107 i, amd->params[i].mref.buf_id,
108 i, amd->params[i].mref.offset,
109 i, amd->params[i].mref.size);
110 } else {
111 /* field 'c' not supported by AMD TEE */
112 tee[i].u.value.a = amd->params[i].val.a;
113 tee[i].u.value.b = amd->params[i].val.b;
114 tee[i].u.value.c = 0;
115 pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
116 __func__,
117 i, amd->params[i].val.a,
118 i, amd->params[i].val.b);
119 }
120 }
121 return ret;
122}
123
124static DEFINE_MUTEX(ta_refcount_mutex);
125static LIST_HEAD(ta_list);
126
127static u32 get_ta_refcount(u32 ta_handle)
128{
129 struct amdtee_ta_data *ta_data;
130 u32 count = 0;
131
132 /* Caller must hold a mutex */
133 list_for_each_entry(ta_data, &ta_list, list_node)
134 if (ta_data->ta_handle == ta_handle)
135 return ++ta_data->refcount;
136
137 ta_data = kzalloc(size: sizeof(*ta_data), GFP_KERNEL);
138 if (ta_data) {
139 ta_data->ta_handle = ta_handle;
140 ta_data->refcount = 1;
141 count = ta_data->refcount;
142 list_add(new: &ta_data->list_node, head: &ta_list);
143 }
144
145 return count;
146}
147
148static u32 put_ta_refcount(u32 ta_handle)
149{
150 struct amdtee_ta_data *ta_data;
151 u32 count = 0;
152
153 /* Caller must hold a mutex */
154 list_for_each_entry(ta_data, &ta_list, list_node)
155 if (ta_data->ta_handle == ta_handle) {
156 count = --ta_data->refcount;
157 if (count == 0) {
158 list_del(entry: &ta_data->list_node);
159 kfree(objp: ta_data);
160 break;
161 }
162 }
163
164 return count;
165}
166
167int handle_unload_ta(u32 ta_handle)
168{
169 struct tee_cmd_unload_ta cmd = {0};
170 u32 status, count;
171 int ret;
172
173 if (!ta_handle)
174 return -EINVAL;
175
176 mutex_lock(&ta_refcount_mutex);
177
178 count = put_ta_refcount(ta_handle);
179
180 if (count) {
181 pr_debug("unload ta: not unloading %u count %u\n",
182 ta_handle, count);
183 ret = -EBUSY;
184 goto unlock;
185 }
186
187 cmd.ta_handle = ta_handle;
188
189 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_UNLOAD_TA, buf: (void *)&cmd,
190 len: sizeof(cmd), status: &status);
191 if (!ret && status != 0) {
192 pr_err("unload ta: status = 0x%x\n", status);
193 ret = -EBUSY;
194 } else {
195 pr_debug("unloaded ta handle %u\n", ta_handle);
196 }
197
198unlock:
199 mutex_unlock(lock: &ta_refcount_mutex);
200 return ret;
201}
202
203int handle_close_session(u32 ta_handle, u32 info)
204{
205 struct tee_cmd_close_session cmd = {0};
206 u32 status;
207 int ret;
208
209 if (ta_handle == 0)
210 return -EINVAL;
211
212 cmd.ta_handle = ta_handle;
213 cmd.session_info = info;
214
215 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_CLOSE_SESSION, buf: (void *)&cmd,
216 len: sizeof(cmd), status: &status);
217 if (!ret && status != 0) {
218 pr_err("close session: status = 0x%x\n", status);
219 ret = -EBUSY;
220 }
221
222 return ret;
223}
224
225void handle_unmap_shmem(u32 buf_id)
226{
227 struct tee_cmd_unmap_shared_mem cmd = {0};
228 u32 status;
229 int ret;
230
231 cmd.buf_id = buf_id;
232
233 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_UNMAP_SHARED_MEM, buf: (void *)&cmd,
234 len: sizeof(cmd), status: &status);
235 if (!ret)
236 pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
237 buf_id, status);
238}
239
240int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
241 struct tee_param *p)
242{
243 struct tee_cmd_invoke_cmd cmd = {0};
244 int ret;
245
246 if (!arg || (!p && arg->num_params))
247 return -EINVAL;
248
249 arg->ret_origin = TEEC_ORIGIN_COMMS;
250
251 if (arg->session == 0) {
252 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
253 return -EINVAL;
254 }
255
256 ret = tee_params_to_amd_params(tee: p, count: arg->num_params, amd: &cmd.op);
257 if (ret) {
258 pr_err("invalid Params. Abort invoke command\n");
259 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
260 return ret;
261 }
262
263 cmd.ta_handle = get_ta_handle(session: arg->session);
264 cmd.cmd_id = arg->func;
265 cmd.session_info = sinfo;
266
267 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_INVOKE_CMD, buf: (void *)&cmd,
268 len: sizeof(cmd), status: &arg->ret);
269 if (ret) {
270 arg->ret = TEEC_ERROR_COMMUNICATION;
271 } else {
272 ret = amd_params_to_tee_params(tee: p, count: arg->num_params, amd: &cmd.op);
273 if (unlikely(ret)) {
274 pr_err("invoke command: failed to copy output\n");
275 arg->ret = TEEC_ERROR_GENERIC;
276 return ret;
277 }
278 arg->ret_origin = cmd.return_origin;
279 pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
280 arg->ret_origin, arg->ret);
281 }
282
283 return ret;
284}
285
286int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
287{
288 struct tee_cmd_map_shared_mem *cmd;
289 phys_addr_t paddr;
290 int ret, i;
291 u32 status;
292
293 if (!count || !start || !buf_id)
294 return -EINVAL;
295
296 cmd = kzalloc(size: sizeof(*cmd), GFP_KERNEL);
297 if (!cmd)
298 return -ENOMEM;
299
300 /* Size must be page aligned */
301 for (i = 0; i < count ; i++) {
302 if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
303 ret = -EINVAL;
304 goto free_cmd;
305 }
306
307 if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
308 pr_err("map shared memory: page unaligned. addr 0x%llx",
309 (u64)start[i].kaddr);
310 ret = -EINVAL;
311 goto free_cmd;
312 }
313 }
314
315 cmd->sg_list.count = count;
316
317 /* Create buffer list */
318 for (i = 0; i < count ; i++) {
319 paddr = __psp_pa(start[i].kaddr);
320 cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
321 cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
322 cmd->sg_list.buf[i].size = start[i].size;
323 cmd->sg_list.size += cmd->sg_list.buf[i].size;
324
325 pr_debug("buf[%d]:hi addr = 0x%x\n", i,
326 cmd->sg_list.buf[i].hi_addr);
327 pr_debug("buf[%d]:low addr = 0x%x\n", i,
328 cmd->sg_list.buf[i].low_addr);
329 pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
330 pr_debug("list size = 0x%x\n", cmd->sg_list.size);
331 }
332
333 *buf_id = 0;
334
335 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_MAP_SHARED_MEM, buf: (void *)cmd,
336 len: sizeof(*cmd), status: &status);
337 if (!ret && !status) {
338 *buf_id = cmd->buf_id;
339 pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
340 } else {
341 pr_err("map shared memory: status = 0x%x\n", status);
342 ret = -ENOMEM;
343 }
344
345free_cmd:
346 kfree(objp: cmd);
347
348 return ret;
349}
350
351int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
352 struct tee_param *p)
353{
354 struct tee_cmd_open_session cmd = {0};
355 int ret;
356
357 if (!arg || !info || (!p && arg->num_params))
358 return -EINVAL;
359
360 arg->ret_origin = TEEC_ORIGIN_COMMS;
361
362 if (arg->session == 0) {
363 arg->ret = TEEC_ERROR_GENERIC;
364 return -EINVAL;
365 }
366
367 ret = tee_params_to_amd_params(tee: p, count: arg->num_params, amd: &cmd.op);
368 if (ret) {
369 pr_err("invalid Params. Abort open session\n");
370 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
371 return ret;
372 }
373
374 cmd.ta_handle = get_ta_handle(session: arg->session);
375 *info = 0;
376
377 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_OPEN_SESSION, buf: (void *)&cmd,
378 len: sizeof(cmd), status: &arg->ret);
379 if (ret) {
380 arg->ret = TEEC_ERROR_COMMUNICATION;
381 } else {
382 ret = amd_params_to_tee_params(tee: p, count: arg->num_params, amd: &cmd.op);
383 if (unlikely(ret)) {
384 pr_err("open session: failed to copy output\n");
385 arg->ret = TEEC_ERROR_GENERIC;
386 return ret;
387 }
388 arg->ret_origin = cmd.return_origin;
389 *info = cmd.session_info;
390 pr_debug("open session: session info = 0x%x\n", *info);
391 }
392
393 pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
394 arg->ret_origin);
395
396 return ret;
397}
398
399int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
400{
401 struct tee_cmd_unload_ta unload_cmd = {};
402 struct tee_cmd_load_ta load_cmd = {};
403 phys_addr_t blob;
404 int ret;
405
406 if (size == 0 || !data || !arg)
407 return -EINVAL;
408
409 blob = __psp_pa(data);
410 if (blob & (PAGE_SIZE - 1)) {
411 pr_err("load TA: page unaligned. blob 0x%llx", blob);
412 return -EINVAL;
413 }
414
415 load_cmd.hi_addr = upper_32_bits(blob);
416 load_cmd.low_addr = lower_32_bits(blob);
417 load_cmd.size = size;
418
419 mutex_lock(&ta_refcount_mutex);
420
421 ret = psp_tee_process_cmd(cmd_id: TEE_CMD_ID_LOAD_TA, buf: (void *)&load_cmd,
422 len: sizeof(load_cmd), status: &arg->ret);
423 if (ret) {
424 arg->ret_origin = TEEC_ORIGIN_COMMS;
425 arg->ret = TEEC_ERROR_COMMUNICATION;
426 } else {
427 arg->ret_origin = load_cmd.return_origin;
428
429 if (arg->ret == TEEC_SUCCESS) {
430 ret = get_ta_refcount(ta_handle: load_cmd.ta_handle);
431 if (!ret) {
432 arg->ret_origin = TEEC_ORIGIN_COMMS;
433 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
434
435 /* Unload the TA on error */
436 unload_cmd.ta_handle = load_cmd.ta_handle;
437 psp_tee_process_cmd(cmd_id: TEE_CMD_ID_UNLOAD_TA,
438 buf: (void *)&unload_cmd,
439 len: sizeof(unload_cmd), status: &ret);
440 } else {
441 set_session_id(ta_handle: load_cmd.ta_handle, session_index: 0, session: &arg->session);
442 }
443 }
444 }
445 mutex_unlock(lock: &ta_refcount_mutex);
446
447 pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
448 load_cmd.ta_handle, arg->ret_origin, arg->ret);
449
450 return 0;
451}
452

source code of linux/drivers/tee/amdtee/call.c