1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2015-2021, 2023 Linaro Limited |
4 | */ |
5 | #include <linux/device.h> |
6 | #include <linux/err.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/tee_drv.h> |
11 | #include <linux/types.h> |
12 | #include "optee_private.h" |
13 | |
14 | #define MAX_ARG_PARAM_COUNT 6 |
15 | |
16 | /* |
17 | * How much memory we allocate for each entry. This doesn't have to be a |
18 | * single page, but it makes sense to keep at least keep it as multiples of |
19 | * the page size. |
20 | */ |
21 | #define SHM_ENTRY_SIZE PAGE_SIZE |
22 | |
23 | /* |
24 | * We need to have a compile time constant to be able to determine the |
25 | * maximum needed size of the bit field. |
26 | */ |
27 | #define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT) |
28 | #define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE) |
29 | |
30 | /* |
31 | * Shared memory for argument structs are cached here. The number of |
32 | * arguments structs that can fit is determined at runtime depending on the |
33 | * needed RPC parameter count reported by secure world |
34 | * (optee->rpc_param_count). |
35 | */ |
36 | struct optee_shm_arg_entry { |
37 | struct list_head list_node; |
38 | struct tee_shm *shm; |
39 | DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY); |
40 | }; |
41 | |
42 | void optee_cq_init(struct optee_call_queue *cq, int thread_count) |
43 | { |
44 | mutex_init(&cq->mutex); |
45 | INIT_LIST_HEAD(list: &cq->waiters); |
46 | |
47 | /* |
48 | * If cq->total_thread_count is 0 then we're not trying to keep |
49 | * track of how many free threads we have, instead we're relying on |
50 | * the secure world to tell us when we're out of thread and have to |
51 | * wait for another thread to become available. |
52 | */ |
53 | cq->total_thread_count = thread_count; |
54 | cq->free_thread_count = thread_count; |
55 | } |
56 | |
57 | void optee_cq_wait_init(struct optee_call_queue *cq, |
58 | struct optee_call_waiter *w, bool sys_thread) |
59 | { |
60 | unsigned int free_thread_threshold; |
61 | bool need_wait = false; |
62 | |
63 | memset(w, 0, sizeof(*w)); |
64 | |
65 | /* |
66 | * We're preparing to make a call to secure world. In case we can't |
67 | * allocate a thread in secure world we'll end up waiting in |
68 | * optee_cq_wait_for_completion(). |
69 | * |
70 | * Normally if there's no contention in secure world the call will |
71 | * complete and we can cleanup directly with optee_cq_wait_final(). |
72 | */ |
73 | mutex_lock(&cq->mutex); |
74 | |
75 | /* |
76 | * We add ourselves to the queue, but we don't wait. This |
77 | * guarantees that we don't lose a completion if secure world |
78 | * returns busy and another thread just exited and try to complete |
79 | * someone. |
80 | */ |
81 | init_completion(x: &w->c); |
82 | list_add_tail(new: &w->list_node, head: &cq->waiters); |
83 | w->sys_thread = sys_thread; |
84 | |
85 | if (cq->total_thread_count) { |
86 | if (sys_thread || !cq->sys_thread_req_count) |
87 | free_thread_threshold = 0; |
88 | else |
89 | free_thread_threshold = 1; |
90 | |
91 | if (cq->free_thread_count > free_thread_threshold) |
92 | cq->free_thread_count--; |
93 | else |
94 | need_wait = true; |
95 | } |
96 | |
97 | mutex_unlock(lock: &cq->mutex); |
98 | |
99 | while (need_wait) { |
100 | optee_cq_wait_for_completion(cq, w); |
101 | mutex_lock(&cq->mutex); |
102 | |
103 | if (sys_thread || !cq->sys_thread_req_count) |
104 | free_thread_threshold = 0; |
105 | else |
106 | free_thread_threshold = 1; |
107 | |
108 | if (cq->free_thread_count > free_thread_threshold) { |
109 | cq->free_thread_count--; |
110 | need_wait = false; |
111 | } |
112 | |
113 | mutex_unlock(lock: &cq->mutex); |
114 | } |
115 | } |
116 | |
117 | void optee_cq_wait_for_completion(struct optee_call_queue *cq, |
118 | struct optee_call_waiter *w) |
119 | { |
120 | wait_for_completion(&w->c); |
121 | |
122 | mutex_lock(&cq->mutex); |
123 | |
124 | /* Move to end of list to get out of the way for other waiters */ |
125 | list_del(entry: &w->list_node); |
126 | reinit_completion(x: &w->c); |
127 | list_add_tail(new: &w->list_node, head: &cq->waiters); |
128 | |
129 | mutex_unlock(lock: &cq->mutex); |
130 | } |
131 | |
132 | static void optee_cq_complete_one(struct optee_call_queue *cq) |
133 | { |
134 | struct optee_call_waiter *w; |
135 | |
136 | /* Wake a waiting system session if any, prior to a normal session */ |
137 | list_for_each_entry(w, &cq->waiters, list_node) { |
138 | if (w->sys_thread && !completion_done(x: &w->c)) { |
139 | complete(&w->c); |
140 | return; |
141 | } |
142 | } |
143 | |
144 | list_for_each_entry(w, &cq->waiters, list_node) { |
145 | if (!completion_done(x: &w->c)) { |
146 | complete(&w->c); |
147 | break; |
148 | } |
149 | } |
150 | } |
151 | |
152 | void optee_cq_wait_final(struct optee_call_queue *cq, |
153 | struct optee_call_waiter *w) |
154 | { |
155 | /* |
156 | * We're done with the call to secure world. The thread in secure |
157 | * world that was used for this call is now available for some |
158 | * other task to use. |
159 | */ |
160 | mutex_lock(&cq->mutex); |
161 | |
162 | /* Get out of the list */ |
163 | list_del(entry: &w->list_node); |
164 | |
165 | cq->free_thread_count++; |
166 | |
167 | /* Wake up one eventual waiting task */ |
168 | optee_cq_complete_one(cq); |
169 | |
170 | /* |
171 | * If we're completed we've got a completion from another task that |
172 | * was just done with its call to secure world. Since yet another |
173 | * thread now is available in secure world wake up another eventual |
174 | * waiting task. |
175 | */ |
176 | if (completion_done(x: &w->c)) |
177 | optee_cq_complete_one(cq); |
178 | |
179 | mutex_unlock(lock: &cq->mutex); |
180 | } |
181 | |
182 | /* Count registered system sessions to reserved a system thread or not */ |
183 | static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq) |
184 | { |
185 | if (cq->total_thread_count <= 1) |
186 | return false; |
187 | |
188 | mutex_lock(&cq->mutex); |
189 | cq->sys_thread_req_count++; |
190 | mutex_unlock(lock: &cq->mutex); |
191 | |
192 | return true; |
193 | } |
194 | |
195 | static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq) |
196 | { |
197 | mutex_lock(&cq->mutex); |
198 | cq->sys_thread_req_count--; |
199 | /* If there's someone waiting, let it resume */ |
200 | optee_cq_complete_one(cq); |
201 | mutex_unlock(lock: &cq->mutex); |
202 | } |
203 | |
204 | /* Requires the filpstate mutex to be held */ |
205 | static struct optee_session *find_session(struct optee_context_data *ctxdata, |
206 | u32 session_id) |
207 | { |
208 | struct optee_session *sess; |
209 | |
210 | list_for_each_entry(sess, &ctxdata->sess_list, list_node) |
211 | if (sess->session_id == session_id) |
212 | return sess; |
213 | |
214 | return NULL; |
215 | } |
216 | |
217 | void optee_shm_arg_cache_init(struct optee *optee, u32 flags) |
218 | { |
219 | INIT_LIST_HEAD(list: &optee->shm_arg_cache.shm_args); |
220 | mutex_init(&optee->shm_arg_cache.mutex); |
221 | optee->shm_arg_cache.flags = flags; |
222 | } |
223 | |
224 | void optee_shm_arg_cache_uninit(struct optee *optee) |
225 | { |
226 | struct list_head *head = &optee->shm_arg_cache.shm_args; |
227 | struct optee_shm_arg_entry *entry; |
228 | |
229 | mutex_destroy(lock: &optee->shm_arg_cache.mutex); |
230 | while (!list_empty(head)) { |
231 | entry = list_first_entry(head, struct optee_shm_arg_entry, |
232 | list_node); |
233 | list_del(entry: &entry->list_node); |
234 | if (find_first_bit(addr: entry->map, MAX_ARG_COUNT_PER_ENTRY) != |
235 | MAX_ARG_COUNT_PER_ENTRY) { |
236 | pr_err("Freeing non-free entry\n" ); |
237 | } |
238 | tee_shm_free(shm: entry->shm); |
239 | kfree(objp: entry); |
240 | } |
241 | } |
242 | |
243 | size_t optee_msg_arg_size(size_t rpc_param_count) |
244 | { |
245 | size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT); |
246 | |
247 | if (rpc_param_count) |
248 | sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count); |
249 | |
250 | return sz; |
251 | } |
252 | |
253 | /** |
254 | * optee_get_msg_arg() - Provide shared memory for argument struct |
255 | * @ctx: Caller TEE context |
256 | * @num_params: Number of parameter to store |
257 | * @entry_ret: Entry pointer, needed when freeing the buffer |
258 | * @shm_ret: Shared memory buffer |
259 | * @offs_ret: Offset of argument strut in shared memory buffer |
260 | * |
261 | * @returns a pointer to the argument struct in memory, else an ERR_PTR |
262 | */ |
263 | struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx, |
264 | size_t num_params, |
265 | struct optee_shm_arg_entry **entry_ret, |
266 | struct tee_shm **shm_ret, |
267 | u_int *offs_ret) |
268 | { |
269 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
270 | size_t sz = optee_msg_arg_size(rpc_param_count: optee->rpc_param_count); |
271 | struct optee_shm_arg_entry *entry; |
272 | struct optee_msg_arg *ma; |
273 | size_t args_per_entry; |
274 | u_long bit; |
275 | u_int offs; |
276 | void *res; |
277 | |
278 | if (num_params > MAX_ARG_PARAM_COUNT) |
279 | return ERR_PTR(error: -EINVAL); |
280 | |
281 | if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED) |
282 | args_per_entry = SHM_ENTRY_SIZE / sz; |
283 | else |
284 | args_per_entry = 1; |
285 | |
286 | mutex_lock(&optee->shm_arg_cache.mutex); |
287 | list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) { |
288 | bit = find_first_zero_bit(addr: entry->map, MAX_ARG_COUNT_PER_ENTRY); |
289 | if (bit < args_per_entry) |
290 | goto have_entry; |
291 | } |
292 | |
293 | /* |
294 | * No entry was found, let's allocate a new. |
295 | */ |
296 | entry = kzalloc(size: sizeof(*entry), GFP_KERNEL); |
297 | if (!entry) { |
298 | res = ERR_PTR(error: -ENOMEM); |
299 | goto out; |
300 | } |
301 | |
302 | if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV) |
303 | res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE); |
304 | else |
305 | res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE); |
306 | |
307 | if (IS_ERR(ptr: res)) { |
308 | kfree(objp: entry); |
309 | goto out; |
310 | } |
311 | entry->shm = res; |
312 | list_add(new: &entry->list_node, head: &optee->shm_arg_cache.shm_args); |
313 | bit = 0; |
314 | |
315 | have_entry: |
316 | offs = bit * sz; |
317 | res = tee_shm_get_va(shm: entry->shm, offs); |
318 | if (IS_ERR(ptr: res)) |
319 | goto out; |
320 | ma = res; |
321 | set_bit(nr: bit, addr: entry->map); |
322 | memset(ma, 0, sz); |
323 | ma->num_params = num_params; |
324 | *entry_ret = entry; |
325 | *shm_ret = entry->shm; |
326 | *offs_ret = offs; |
327 | out: |
328 | mutex_unlock(lock: &optee->shm_arg_cache.mutex); |
329 | return res; |
330 | } |
331 | |
332 | /** |
333 | * optee_free_msg_arg() - Free previsouly obtained shared memory |
334 | * @ctx: Caller TEE context |
335 | * @entry: Pointer returned when the shared memory was obtained |
336 | * @offs: Offset of shared memory buffer to free |
337 | * |
338 | * This function frees the shared memory obtained with optee_get_msg_arg(). |
339 | */ |
340 | void optee_free_msg_arg(struct tee_context *ctx, |
341 | struct optee_shm_arg_entry *entry, u_int offs) |
342 | { |
343 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
344 | size_t sz = optee_msg_arg_size(rpc_param_count: optee->rpc_param_count); |
345 | u_long bit; |
346 | |
347 | if (offs > SHM_ENTRY_SIZE || offs % sz) { |
348 | pr_err("Invalid offs %u\n" , offs); |
349 | return; |
350 | } |
351 | bit = offs / sz; |
352 | |
353 | mutex_lock(&optee->shm_arg_cache.mutex); |
354 | |
355 | if (!test_bit(bit, entry->map)) |
356 | pr_err("Bit pos %lu is already free\n" , bit); |
357 | clear_bit(nr: bit, addr: entry->map); |
358 | |
359 | mutex_unlock(lock: &optee->shm_arg_cache.mutex); |
360 | } |
361 | |
362 | int optee_open_session(struct tee_context *ctx, |
363 | struct tee_ioctl_open_session_arg *arg, |
364 | struct tee_param *param) |
365 | { |
366 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
367 | struct optee_context_data *ctxdata = ctx->data; |
368 | struct optee_shm_arg_entry *entry; |
369 | struct tee_shm *shm; |
370 | struct optee_msg_arg *msg_arg; |
371 | struct optee_session *sess = NULL; |
372 | uuid_t client_uuid; |
373 | u_int offs; |
374 | int rc; |
375 | |
376 | /* +2 for the meta parameters added below */ |
377 | msg_arg = optee_get_msg_arg(ctx, num_params: arg->num_params + 2, |
378 | entry_ret: &entry, shm_ret: &shm, offs_ret: &offs); |
379 | if (IS_ERR(ptr: msg_arg)) |
380 | return PTR_ERR(ptr: msg_arg); |
381 | |
382 | msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; |
383 | msg_arg->cancel_id = arg->cancel_id; |
384 | |
385 | /* |
386 | * Initialize and add the meta parameters needed when opening a |
387 | * session. |
388 | */ |
389 | msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | |
390 | OPTEE_MSG_ATTR_META; |
391 | msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | |
392 | OPTEE_MSG_ATTR_META; |
393 | memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); |
394 | msg_arg->params[1].u.value.c = arg->clnt_login; |
395 | |
396 | rc = tee_session_calc_client_uuid(uuid: &client_uuid, connection_method: arg->clnt_login, |
397 | connection_data: arg->clnt_uuid); |
398 | if (rc) |
399 | goto out; |
400 | export_uuid(dst: msg_arg->params[1].u.octets, src: &client_uuid); |
401 | |
402 | rc = optee->ops->to_msg_param(optee, msg_arg->params + 2, |
403 | arg->num_params, param); |
404 | if (rc) |
405 | goto out; |
406 | |
407 | sess = kzalloc(size: sizeof(*sess), GFP_KERNEL); |
408 | if (!sess) { |
409 | rc = -ENOMEM; |
410 | goto out; |
411 | } |
412 | |
413 | if (optee->ops->do_call_with_arg(ctx, shm, offs, |
414 | sess->use_sys_thread)) { |
415 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; |
416 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; |
417 | } |
418 | |
419 | if (msg_arg->ret == TEEC_SUCCESS) { |
420 | /* A new session has been created, add it to the list. */ |
421 | sess->session_id = msg_arg->session; |
422 | mutex_lock(&ctxdata->mutex); |
423 | list_add(new: &sess->list_node, head: &ctxdata->sess_list); |
424 | mutex_unlock(lock: &ctxdata->mutex); |
425 | } else { |
426 | kfree(objp: sess); |
427 | } |
428 | |
429 | if (optee->ops->from_msg_param(optee, param, arg->num_params, |
430 | msg_arg->params + 2)) { |
431 | arg->ret = TEEC_ERROR_COMMUNICATION; |
432 | arg->ret_origin = TEEC_ORIGIN_COMMS; |
433 | /* Close session again to avoid leakage */ |
434 | optee_close_session(ctx, session: msg_arg->session); |
435 | } else { |
436 | arg->session = msg_arg->session; |
437 | arg->ret = msg_arg->ret; |
438 | arg->ret_origin = msg_arg->ret_origin; |
439 | } |
440 | out: |
441 | optee_free_msg_arg(ctx, entry, offs); |
442 | |
443 | return rc; |
444 | } |
445 | |
446 | int optee_system_session(struct tee_context *ctx, u32 session) |
447 | { |
448 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
449 | struct optee_context_data *ctxdata = ctx->data; |
450 | struct optee_session *sess; |
451 | int rc = -EINVAL; |
452 | |
453 | mutex_lock(&ctxdata->mutex); |
454 | |
455 | sess = find_session(ctxdata, session_id: session); |
456 | if (sess && (sess->use_sys_thread || |
457 | optee_cq_incr_sys_thread_count(cq: &optee->call_queue))) { |
458 | sess->use_sys_thread = true; |
459 | rc = 0; |
460 | } |
461 | |
462 | mutex_unlock(lock: &ctxdata->mutex); |
463 | |
464 | return rc; |
465 | } |
466 | |
467 | int optee_close_session_helper(struct tee_context *ctx, u32 session, |
468 | bool system_thread) |
469 | { |
470 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
471 | struct optee_shm_arg_entry *entry; |
472 | struct optee_msg_arg *msg_arg; |
473 | struct tee_shm *shm; |
474 | u_int offs; |
475 | |
476 | msg_arg = optee_get_msg_arg(ctx, num_params: 0, entry_ret: &entry, shm_ret: &shm, offs_ret: &offs); |
477 | if (IS_ERR(ptr: msg_arg)) |
478 | return PTR_ERR(ptr: msg_arg); |
479 | |
480 | msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; |
481 | msg_arg->session = session; |
482 | optee->ops->do_call_with_arg(ctx, shm, offs, system_thread); |
483 | |
484 | optee_free_msg_arg(ctx, entry, offs); |
485 | |
486 | if (system_thread) |
487 | optee_cq_decr_sys_thread_count(cq: &optee->call_queue); |
488 | |
489 | return 0; |
490 | } |
491 | |
492 | int optee_close_session(struct tee_context *ctx, u32 session) |
493 | { |
494 | struct optee_context_data *ctxdata = ctx->data; |
495 | struct optee_session *sess; |
496 | bool system_thread; |
497 | |
498 | /* Check that the session is valid and remove it from the list */ |
499 | mutex_lock(&ctxdata->mutex); |
500 | sess = find_session(ctxdata, session_id: session); |
501 | if (sess) |
502 | list_del(entry: &sess->list_node); |
503 | mutex_unlock(lock: &ctxdata->mutex); |
504 | if (!sess) |
505 | return -EINVAL; |
506 | system_thread = sess->use_sys_thread; |
507 | kfree(objp: sess); |
508 | |
509 | return optee_close_session_helper(ctx, session, system_thread); |
510 | } |
511 | |
512 | int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, |
513 | struct tee_param *param) |
514 | { |
515 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
516 | struct optee_context_data *ctxdata = ctx->data; |
517 | struct optee_shm_arg_entry *entry; |
518 | struct optee_msg_arg *msg_arg; |
519 | struct optee_session *sess; |
520 | struct tee_shm *shm; |
521 | bool system_thread; |
522 | u_int offs; |
523 | int rc; |
524 | |
525 | /* Check that the session is valid */ |
526 | mutex_lock(&ctxdata->mutex); |
527 | sess = find_session(ctxdata, session_id: arg->session); |
528 | if (sess) |
529 | system_thread = sess->use_sys_thread; |
530 | mutex_unlock(lock: &ctxdata->mutex); |
531 | if (!sess) |
532 | return -EINVAL; |
533 | |
534 | msg_arg = optee_get_msg_arg(ctx, num_params: arg->num_params, |
535 | entry_ret: &entry, shm_ret: &shm, offs_ret: &offs); |
536 | if (IS_ERR(ptr: msg_arg)) |
537 | return PTR_ERR(ptr: msg_arg); |
538 | msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; |
539 | msg_arg->func = arg->func; |
540 | msg_arg->session = arg->session; |
541 | msg_arg->cancel_id = arg->cancel_id; |
542 | |
543 | rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params, |
544 | param); |
545 | if (rc) |
546 | goto out; |
547 | |
548 | if (optee->ops->do_call_with_arg(ctx, shm, offs, system_thread)) { |
549 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; |
550 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; |
551 | } |
552 | |
553 | if (optee->ops->from_msg_param(optee, param, arg->num_params, |
554 | msg_arg->params)) { |
555 | msg_arg->ret = TEEC_ERROR_COMMUNICATION; |
556 | msg_arg->ret_origin = TEEC_ORIGIN_COMMS; |
557 | } |
558 | |
559 | arg->ret = msg_arg->ret; |
560 | arg->ret_origin = msg_arg->ret_origin; |
561 | out: |
562 | optee_free_msg_arg(ctx, entry, offs); |
563 | return rc; |
564 | } |
565 | |
566 | int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) |
567 | { |
568 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
569 | struct optee_context_data *ctxdata = ctx->data; |
570 | struct optee_shm_arg_entry *entry; |
571 | struct optee_msg_arg *msg_arg; |
572 | struct optee_session *sess; |
573 | bool system_thread; |
574 | struct tee_shm *shm; |
575 | u_int offs; |
576 | |
577 | /* Check that the session is valid */ |
578 | mutex_lock(&ctxdata->mutex); |
579 | sess = find_session(ctxdata, session_id: session); |
580 | if (sess) |
581 | system_thread = sess->use_sys_thread; |
582 | mutex_unlock(lock: &ctxdata->mutex); |
583 | if (!sess) |
584 | return -EINVAL; |
585 | |
586 | msg_arg = optee_get_msg_arg(ctx, num_params: 0, entry_ret: &entry, shm_ret: &shm, offs_ret: &offs); |
587 | if (IS_ERR(ptr: msg_arg)) |
588 | return PTR_ERR(ptr: msg_arg); |
589 | |
590 | msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; |
591 | msg_arg->session = session; |
592 | msg_arg->cancel_id = cancel_id; |
593 | optee->ops->do_call_with_arg(ctx, shm, offs, system_thread); |
594 | |
595 | optee_free_msg_arg(ctx, entry, offs); |
596 | return 0; |
597 | } |
598 | |
599 | static bool is_normal_memory(pgprot_t p) |
600 | { |
601 | #if defined(CONFIG_ARM) |
602 | return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) || |
603 | ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK)); |
604 | #elif defined(CONFIG_ARM64) |
605 | return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); |
606 | #else |
607 | #error "Unsupported architecture" |
608 | #endif |
609 | } |
610 | |
611 | static int __check_mem_type(struct mm_struct *mm, unsigned long start, |
612 | unsigned long end) |
613 | { |
614 | struct vm_area_struct *vma; |
615 | VMA_ITERATOR(vmi, mm, start); |
616 | |
617 | for_each_vma_range(vmi, vma, end) { |
618 | if (!is_normal_memory(p: vma->vm_page_prot)) |
619 | return -EINVAL; |
620 | } |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | int optee_check_mem_type(unsigned long start, size_t num_pages) |
626 | { |
627 | struct mm_struct *mm = current->mm; |
628 | int rc; |
629 | |
630 | /* |
631 | * Allow kernel address to register with OP-TEE as kernel |
632 | * pages are configured as normal memory only. |
633 | */ |
634 | if (virt_addr_valid((void *)start) || is_vmalloc_addr(x: (void *)start)) |
635 | return 0; |
636 | |
637 | mmap_read_lock(mm); |
638 | rc = __check_mem_type(mm, start, end: start + num_pages * PAGE_SIZE); |
639 | mmap_read_unlock(mm); |
640 | |
641 | return rc; |
642 | } |
643 | |
644 | static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) |
645 | { |
646 | struct optee *optee = tee_get_drvdata(teedev: ctx->teedev); |
647 | struct optee_shm_arg_entry *entry; |
648 | struct optee_msg_arg *msg_arg; |
649 | struct tee_shm *shm; |
650 | u_int offs; |
651 | |
652 | msg_arg = optee_get_msg_arg(ctx, num_params: 0, entry_ret: &entry, shm_ret: &shm, offs_ret: &offs); |
653 | if (IS_ERR(ptr: msg_arg)) |
654 | return PTR_ERR(ptr: msg_arg); |
655 | |
656 | msg_arg->cmd = cmd; |
657 | optee->ops->do_call_with_arg(ctx, shm, offs, false); |
658 | |
659 | optee_free_msg_arg(ctx, entry, offs); |
660 | return 0; |
661 | } |
662 | |
663 | int optee_do_bottom_half(struct tee_context *ctx) |
664 | { |
665 | return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF); |
666 | } |
667 | |
668 | int optee_stop_async_notif(struct tee_context *ctx) |
669 | { |
670 | return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF); |
671 | } |
672 | |