1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Arm Firmware Framework for ARMv8-A(FFA) interface driver |
4 | * |
5 | * The Arm FFA specification[1] describes a software architecture to |
6 | * leverages the virtualization extension to isolate software images |
7 | * provided by an ecosystem of vendors from each other and describes |
8 | * interfaces that standardize communication between the various software |
9 | * images including communication between images in the Secure world and |
10 | * Normal world. Any Hypervisor could use the FFA interfaces to enable |
11 | * communication between VMs it manages. |
12 | * |
13 | * The Hypervisor a.k.a Partition managers in FFA terminology can assign |
14 | * system resources(Memory regions, Devices, CPU cycles) to the partitions |
15 | * and manage isolation amongst them. |
16 | * |
17 | * [1] https://developer.arm.com/docs/den0077/latest |
18 | * |
19 | * Copyright (C) 2021 ARM Ltd. |
20 | */ |
21 | |
22 | #define DRIVER_NAME "ARM FF-A" |
23 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt |
24 | |
25 | #include <linux/acpi.h> |
26 | #include <linux/arm_ffa.h> |
27 | #include <linux/bitfield.h> |
28 | #include <linux/cpuhotplug.h> |
29 | #include <linux/device.h> |
30 | #include <linux/hashtable.h> |
31 | #include <linux/interrupt.h> |
32 | #include <linux/io.h> |
33 | #include <linux/kernel.h> |
34 | #include <linux/module.h> |
35 | #include <linux/mm.h> |
36 | #include <linux/mutex.h> |
37 | #include <linux/of_irq.h> |
38 | #include <linux/scatterlist.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/smp.h> |
41 | #include <linux/uuid.h> |
42 | #include <linux/xarray.h> |
43 | |
44 | #include "common.h" |
45 | |
46 | #define FFA_DRIVER_VERSION FFA_VERSION_1_1 |
47 | #define FFA_MIN_VERSION FFA_VERSION_1_0 |
48 | |
49 | #define SENDER_ID_MASK GENMASK(31, 16) |
50 | #define RECEIVER_ID_MASK GENMASK(15, 0) |
51 | #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x)))) |
52 | #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x)))) |
53 | #define PACK_TARGET_INFO(s, r) \ |
54 | (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) |
55 | |
56 | /* |
57 | * Keeping RX TX buffer size as 4K for now |
58 | * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config |
59 | */ |
60 | #define RXTX_BUFFER_SIZE SZ_4K |
61 | |
62 | #define FFA_MAX_NOTIFICATIONS 64 |
63 | |
64 | static ffa_fn *invoke_ffa_fn; |
65 | |
66 | static const int ffa_linux_errmap[] = { |
67 | /* better than switch case as long as return value is continuous */ |
68 | 0, /* FFA_RET_SUCCESS */ |
69 | -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */ |
70 | -EINVAL, /* FFA_RET_INVALID_PARAMETERS */ |
71 | -ENOMEM, /* FFA_RET_NO_MEMORY */ |
72 | -EBUSY, /* FFA_RET_BUSY */ |
73 | -EINTR, /* FFA_RET_INTERRUPTED */ |
74 | -EACCES, /* FFA_RET_DENIED */ |
75 | -EAGAIN, /* FFA_RET_RETRY */ |
76 | -ECANCELED, /* FFA_RET_ABORTED */ |
77 | -ENODATA, /* FFA_RET_NO_DATA */ |
78 | }; |
79 | |
80 | static inline int ffa_to_linux_errno(int errno) |
81 | { |
82 | int err_idx = -errno; |
83 | |
84 | if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) |
85 | return ffa_linux_errmap[err_idx]; |
86 | return -EINVAL; |
87 | } |
88 | |
89 | struct ffa_pcpu_irq { |
90 | struct ffa_drv_info *info; |
91 | }; |
92 | |
93 | struct ffa_drv_info { |
94 | u32 version; |
95 | u16 vm_id; |
96 | struct mutex rx_lock; /* lock to protect Rx buffer */ |
97 | struct mutex tx_lock; /* lock to protect Tx buffer */ |
98 | void *rx_buffer; |
99 | void *tx_buffer; |
100 | bool mem_ops_native; |
101 | bool bitmap_created; |
102 | bool notif_enabled; |
103 | unsigned int sched_recv_irq; |
104 | unsigned int cpuhp_state; |
105 | struct ffa_pcpu_irq __percpu *irq_pcpu; |
106 | struct workqueue_struct *notif_pcpu_wq; |
107 | struct work_struct notif_pcpu_work; |
108 | struct work_struct irq_work; |
109 | struct xarray partition_info; |
110 | DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); |
111 | struct mutex notify_lock; /* lock to protect notifier hashtable */ |
112 | }; |
113 | |
114 | static struct ffa_drv_info *drv_info; |
115 | static void ffa_partitions_cleanup(void); |
116 | |
117 | /* |
118 | * The driver must be able to support all the versions from the earliest |
119 | * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION. |
120 | * The specification states that if firmware supports a FFA implementation |
121 | * that is incompatible with and at a greater version number than specified |
122 | * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION), |
123 | * it must return the NOT_SUPPORTED error code. |
124 | */ |
125 | static u32 ffa_compatible_version_find(u32 version) |
126 | { |
127 | u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version); |
128 | u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION); |
129 | u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION); |
130 | |
131 | if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) |
132 | return version; |
133 | |
134 | pr_info("Firmware version higher than driver version, downgrading\n" ); |
135 | return FFA_DRIVER_VERSION; |
136 | } |
137 | |
138 | static int ffa_version_check(u32 *version) |
139 | { |
140 | ffa_value_t ver; |
141 | |
142 | invoke_ffa_fn((ffa_value_t){ |
143 | .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, |
144 | }, &ver); |
145 | |
146 | if (ver.a0 == FFA_RET_NOT_SUPPORTED) { |
147 | pr_info("FFA_VERSION returned not supported\n" ); |
148 | return -EOPNOTSUPP; |
149 | } |
150 | |
151 | if (ver.a0 < FFA_MIN_VERSION) { |
152 | pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n" , |
153 | FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), |
154 | FFA_MAJOR_VERSION(FFA_MIN_VERSION), |
155 | FFA_MINOR_VERSION(FFA_MIN_VERSION)); |
156 | return -EINVAL; |
157 | } |
158 | |
159 | pr_info("Driver version %d.%d\n" , FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), |
160 | FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); |
161 | pr_info("Firmware version %d.%d found\n" , FFA_MAJOR_VERSION(ver.a0), |
162 | FFA_MINOR_VERSION(ver.a0)); |
163 | *version = ffa_compatible_version_find(version: ver.a0); |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | static int ffa_rx_release(void) |
169 | { |
170 | ffa_value_t ret; |
171 | |
172 | invoke_ffa_fn((ffa_value_t){ |
173 | .a0 = FFA_RX_RELEASE, |
174 | }, &ret); |
175 | |
176 | if (ret.a0 == FFA_ERROR) |
177 | return ffa_to_linux_errno(errno: (int)ret.a2); |
178 | |
179 | /* check for ret.a0 == FFA_RX_RELEASE ? */ |
180 | |
181 | return 0; |
182 | } |
183 | |
184 | static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt) |
185 | { |
186 | ffa_value_t ret; |
187 | |
188 | invoke_ffa_fn((ffa_value_t){ |
189 | .a0 = FFA_FN_NATIVE(RXTX_MAP), |
190 | .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt, |
191 | }, &ret); |
192 | |
193 | if (ret.a0 == FFA_ERROR) |
194 | return ffa_to_linux_errno(errno: (int)ret.a2); |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static int ffa_rxtx_unmap(u16 vm_id) |
200 | { |
201 | ffa_value_t ret; |
202 | |
203 | invoke_ffa_fn((ffa_value_t){ |
204 | .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), |
205 | }, &ret); |
206 | |
207 | if (ret.a0 == FFA_ERROR) |
208 | return ffa_to_linux_errno(errno: (int)ret.a2); |
209 | |
210 | return 0; |
211 | } |
212 | |
213 | #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) |
214 | |
215 | /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ |
216 | static int |
217 | __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, |
218 | struct ffa_partition_info *buffer, int num_partitions) |
219 | { |
220 | int idx, count, flags = 0, sz, buf_sz; |
221 | ffa_value_t partition_info; |
222 | |
223 | if (drv_info->version > FFA_VERSION_1_0 && |
224 | (!buffer || !num_partitions)) /* Just get the count for now */ |
225 | flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY; |
226 | |
227 | mutex_lock(&drv_info->rx_lock); |
228 | invoke_ffa_fn((ffa_value_t){ |
229 | .a0 = FFA_PARTITION_INFO_GET, |
230 | .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3, |
231 | .a5 = flags, |
232 | }, &partition_info); |
233 | |
234 | if (partition_info.a0 == FFA_ERROR) { |
235 | mutex_unlock(lock: &drv_info->rx_lock); |
236 | return ffa_to_linux_errno(errno: (int)partition_info.a2); |
237 | } |
238 | |
239 | count = partition_info.a2; |
240 | |
241 | if (drv_info->version > FFA_VERSION_1_0) { |
242 | buf_sz = sz = partition_info.a3; |
243 | if (sz > sizeof(*buffer)) |
244 | buf_sz = sizeof(*buffer); |
245 | } else { |
246 | /* FFA_VERSION_1_0 lacks size in the response */ |
247 | buf_sz = sz = 8; |
248 | } |
249 | |
250 | if (buffer && count <= num_partitions) |
251 | for (idx = 0; idx < count; idx++) |
252 | memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, |
253 | buf_sz); |
254 | |
255 | ffa_rx_release(); |
256 | |
257 | mutex_unlock(lock: &drv_info->rx_lock); |
258 | |
259 | return count; |
260 | } |
261 | |
262 | /* buffer is allocated and caller must free the same if returned count > 0 */ |
263 | static int |
264 | ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) |
265 | { |
266 | int count; |
267 | u32 uuid0_4[4]; |
268 | struct ffa_partition_info *pbuf; |
269 | |
270 | export_uuid(dst: (u8 *)uuid0_4, src: uuid); |
271 | count = __ffa_partition_info_get(uuid0: uuid0_4[0], uuid1: uuid0_4[1], uuid2: uuid0_4[2], |
272 | uuid3: uuid0_4[3], NULL, num_partitions: 0); |
273 | if (count <= 0) |
274 | return count; |
275 | |
276 | pbuf = kcalloc(n: count, size: sizeof(*pbuf), GFP_KERNEL); |
277 | if (!pbuf) |
278 | return -ENOMEM; |
279 | |
280 | count = __ffa_partition_info_get(uuid0: uuid0_4[0], uuid1: uuid0_4[1], uuid2: uuid0_4[2], |
281 | uuid3: uuid0_4[3], buffer: pbuf, num_partitions: count); |
282 | if (count <= 0) |
283 | kfree(objp: pbuf); |
284 | else |
285 | *buffer = pbuf; |
286 | |
287 | return count; |
288 | } |
289 | |
290 | #define VM_ID_MASK GENMASK(15, 0) |
291 | static int ffa_id_get(u16 *vm_id) |
292 | { |
293 | ffa_value_t id; |
294 | |
295 | invoke_ffa_fn((ffa_value_t){ |
296 | .a0 = FFA_ID_GET, |
297 | }, &id); |
298 | |
299 | if (id.a0 == FFA_ERROR) |
300 | return ffa_to_linux_errno(errno: (int)id.a2); |
301 | |
302 | *vm_id = FIELD_GET(VM_ID_MASK, (id.a2)); |
303 | |
304 | return 0; |
305 | } |
306 | |
307 | static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, |
308 | struct ffa_send_direct_data *data) |
309 | { |
310 | u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); |
311 | ffa_value_t ret; |
312 | |
313 | if (mode_32bit) { |
314 | req_id = FFA_MSG_SEND_DIRECT_REQ; |
315 | resp_id = FFA_MSG_SEND_DIRECT_RESP; |
316 | } else { |
317 | req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ); |
318 | resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP); |
319 | } |
320 | |
321 | invoke_ffa_fn((ffa_value_t){ |
322 | .a0 = req_id, .a1 = src_dst_ids, .a2 = 0, |
323 | .a3 = data->data0, .a4 = data->data1, .a5 = data->data2, |
324 | .a6 = data->data3, .a7 = data->data4, |
325 | }, &ret); |
326 | |
327 | while (ret.a0 == FFA_INTERRUPT) |
328 | invoke_ffa_fn((ffa_value_t){ |
329 | .a0 = FFA_RUN, .a1 = ret.a1, |
330 | }, &ret); |
331 | |
332 | if (ret.a0 == FFA_ERROR) |
333 | return ffa_to_linux_errno(errno: (int)ret.a2); |
334 | |
335 | if (ret.a0 == resp_id) { |
336 | data->data0 = ret.a3; |
337 | data->data1 = ret.a4; |
338 | data->data2 = ret.a5; |
339 | data->data3 = ret.a6; |
340 | data->data4 = ret.a7; |
341 | return 0; |
342 | } |
343 | |
344 | return -EINVAL; |
345 | } |
346 | |
347 | static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, |
348 | u32 frag_len, u32 len, u64 *handle) |
349 | { |
350 | ffa_value_t ret; |
351 | |
352 | invoke_ffa_fn((ffa_value_t){ |
353 | .a0 = func_id, .a1 = len, .a2 = frag_len, |
354 | .a3 = buf, .a4 = buf_sz, |
355 | }, &ret); |
356 | |
357 | while (ret.a0 == FFA_MEM_OP_PAUSE) |
358 | invoke_ffa_fn((ffa_value_t){ |
359 | .a0 = FFA_MEM_OP_RESUME, |
360 | .a1 = ret.a1, .a2 = ret.a2, |
361 | }, &ret); |
362 | |
363 | if (ret.a0 == FFA_ERROR) |
364 | return ffa_to_linux_errno(errno: (int)ret.a2); |
365 | |
366 | if (ret.a0 == FFA_SUCCESS) { |
367 | if (handle) |
368 | *handle = PACK_HANDLE(ret.a2, ret.a3); |
369 | } else if (ret.a0 == FFA_MEM_FRAG_RX) { |
370 | if (handle) |
371 | *handle = PACK_HANDLE(ret.a1, ret.a2); |
372 | } else { |
373 | return -EOPNOTSUPP; |
374 | } |
375 | |
376 | return frag_len; |
377 | } |
378 | |
379 | static int ffa_mem_next_frag(u64 handle, u32 frag_len) |
380 | { |
381 | ffa_value_t ret; |
382 | |
383 | invoke_ffa_fn((ffa_value_t){ |
384 | .a0 = FFA_MEM_FRAG_TX, |
385 | .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle), |
386 | .a3 = frag_len, |
387 | }, &ret); |
388 | |
389 | while (ret.a0 == FFA_MEM_OP_PAUSE) |
390 | invoke_ffa_fn((ffa_value_t){ |
391 | .a0 = FFA_MEM_OP_RESUME, |
392 | .a1 = ret.a1, .a2 = ret.a2, |
393 | }, &ret); |
394 | |
395 | if (ret.a0 == FFA_ERROR) |
396 | return ffa_to_linux_errno(errno: (int)ret.a2); |
397 | |
398 | if (ret.a0 == FFA_MEM_FRAG_RX) |
399 | return ret.a3; |
400 | else if (ret.a0 == FFA_SUCCESS) |
401 | return 0; |
402 | |
403 | return -EOPNOTSUPP; |
404 | } |
405 | |
406 | static int |
407 | ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, |
408 | u32 len, u64 *handle, bool first) |
409 | { |
410 | if (!first) |
411 | return ffa_mem_next_frag(handle: *handle, frag_len); |
412 | |
413 | return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle); |
414 | } |
415 | |
416 | static u32 ffa_get_num_pages_sg(struct scatterlist *sg) |
417 | { |
418 | u32 num_pages = 0; |
419 | |
420 | do { |
421 | num_pages += sg->length / FFA_PAGE_SIZE; |
422 | } while ((sg = sg_next(sg))); |
423 | |
424 | return num_pages; |
425 | } |
426 | |
427 | static u16 ffa_memory_attributes_get(u32 func_id) |
428 | { |
429 | /* |
430 | * For the memory lend or donate operation, if the receiver is a PE or |
431 | * a proxy endpoint, the owner/sender must not specify the attributes |
432 | */ |
433 | if (func_id == FFA_FN_NATIVE(MEM_LEND) || |
434 | func_id == FFA_MEM_LEND) |
435 | return 0; |
436 | |
437 | return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; |
438 | } |
439 | |
440 | static int |
441 | ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, |
442 | struct ffa_mem_ops_args *args) |
443 | { |
444 | int rc = 0; |
445 | bool first = true; |
446 | u32 composite_offset; |
447 | phys_addr_t addr = 0; |
448 | struct ffa_mem_region *mem_region = buffer; |
449 | struct ffa_composite_mem_region *composite; |
450 | struct ffa_mem_region_addr_range *constituents; |
451 | struct ffa_mem_region_attributes *ep_mem_access; |
452 | u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(sg: args->sg); |
453 | |
454 | mem_region->tag = args->tag; |
455 | mem_region->flags = args->flags; |
456 | mem_region->sender_id = drv_info->vm_id; |
457 | mem_region->attributes = ffa_memory_attributes_get(func_id); |
458 | ep_mem_access = buffer + |
459 | ffa_mem_desc_offset(buf: buffer, count: 0, ffa_version: drv_info->version); |
460 | composite_offset = ffa_mem_desc_offset(buf: buffer, count: args->nattrs, |
461 | ffa_version: drv_info->version); |
462 | |
463 | for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { |
464 | ep_mem_access->receiver = args->attrs[idx].receiver; |
465 | ep_mem_access->attrs = args->attrs[idx].attrs; |
466 | ep_mem_access->composite_off = composite_offset; |
467 | ep_mem_access->flag = 0; |
468 | ep_mem_access->reserved = 0; |
469 | } |
470 | mem_region->handle = 0; |
471 | mem_region->ep_count = args->nattrs; |
472 | if (drv_info->version <= FFA_VERSION_1_0) { |
473 | mem_region->ep_mem_size = 0; |
474 | } else { |
475 | mem_region->ep_mem_size = sizeof(*ep_mem_access); |
476 | mem_region->ep_mem_offset = sizeof(*mem_region); |
477 | memset(mem_region->reserved, 0, 12); |
478 | } |
479 | |
480 | composite = buffer + composite_offset; |
481 | composite->total_pg_cnt = ffa_get_num_pages_sg(sg: args->sg); |
482 | composite->addr_range_cnt = num_entries; |
483 | composite->reserved = 0; |
484 | |
485 | length = composite_offset + CONSTITUENTS_OFFSET(num_entries); |
486 | frag_len = composite_offset + CONSTITUENTS_OFFSET(0); |
487 | if (frag_len > max_fragsize) |
488 | return -ENXIO; |
489 | |
490 | if (!args->use_txbuf) { |
491 | addr = virt_to_phys(address: buffer); |
492 | buf_sz = max_fragsize / FFA_PAGE_SIZE; |
493 | } |
494 | |
495 | constituents = buffer + frag_len; |
496 | idx = 0; |
497 | do { |
498 | if (frag_len == max_fragsize) { |
499 | rc = ffa_transmit_fragment(func_id, buf: addr, buf_sz, |
500 | frag_len, len: length, |
501 | handle: &args->g_handle, first); |
502 | if (rc < 0) |
503 | return -ENXIO; |
504 | |
505 | first = false; |
506 | idx = 0; |
507 | frag_len = 0; |
508 | constituents = buffer; |
509 | } |
510 | |
511 | if ((void *)constituents - buffer > max_fragsize) { |
512 | pr_err("Memory Region Fragment > Tx Buffer size\n" ); |
513 | return -EFAULT; |
514 | } |
515 | |
516 | constituents->address = sg_phys(sg: args->sg); |
517 | constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; |
518 | constituents->reserved = 0; |
519 | constituents++; |
520 | frag_len += sizeof(struct ffa_mem_region_addr_range); |
521 | } while ((args->sg = sg_next(args->sg))); |
522 | |
523 | return ffa_transmit_fragment(func_id, buf: addr, buf_sz, frag_len, |
524 | len: length, handle: &args->g_handle, first); |
525 | } |
526 | |
527 | static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) |
528 | { |
529 | int ret; |
530 | void *buffer; |
531 | |
532 | if (!args->use_txbuf) { |
533 | buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); |
534 | if (!buffer) |
535 | return -ENOMEM; |
536 | } else { |
537 | buffer = drv_info->tx_buffer; |
538 | mutex_lock(&drv_info->tx_lock); |
539 | } |
540 | |
541 | ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); |
542 | |
543 | if (args->use_txbuf) |
544 | mutex_unlock(lock: &drv_info->tx_lock); |
545 | else |
546 | free_pages_exact(virt: buffer, RXTX_BUFFER_SIZE); |
547 | |
548 | return ret < 0 ? ret : 0; |
549 | } |
550 | |
551 | static int ffa_memory_reclaim(u64 g_handle, u32 flags) |
552 | { |
553 | ffa_value_t ret; |
554 | |
555 | invoke_ffa_fn((ffa_value_t){ |
556 | .a0 = FFA_MEM_RECLAIM, |
557 | .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle), |
558 | .a3 = flags, |
559 | }, &ret); |
560 | |
561 | if (ret.a0 == FFA_ERROR) |
562 | return ffa_to_linux_errno(errno: (int)ret.a2); |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static int ffa_features(u32 func_feat_id, u32 input_props, |
568 | u32 *if_props_1, u32 *if_props_2) |
569 | { |
570 | ffa_value_t id; |
571 | |
572 | if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { |
573 | pr_err("%s: Invalid Parameters: %x, %x" , __func__, |
574 | func_feat_id, input_props); |
575 | return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); |
576 | } |
577 | |
578 | invoke_ffa_fn((ffa_value_t){ |
579 | .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, |
580 | }, &id); |
581 | |
582 | if (id.a0 == FFA_ERROR) |
583 | return ffa_to_linux_errno(errno: (int)id.a2); |
584 | |
585 | if (if_props_1) |
586 | *if_props_1 = id.a2; |
587 | if (if_props_2) |
588 | *if_props_2 = id.a3; |
589 | |
590 | return 0; |
591 | } |
592 | |
593 | static int ffa_notification_bitmap_create(void) |
594 | { |
595 | ffa_value_t ret; |
596 | u16 vcpu_count = nr_cpu_ids; |
597 | |
598 | invoke_ffa_fn((ffa_value_t){ |
599 | .a0 = FFA_NOTIFICATION_BITMAP_CREATE, |
600 | .a1 = drv_info->vm_id, .a2 = vcpu_count, |
601 | }, &ret); |
602 | |
603 | if (ret.a0 == FFA_ERROR) |
604 | return ffa_to_linux_errno(errno: (int)ret.a2); |
605 | |
606 | return 0; |
607 | } |
608 | |
609 | static int ffa_notification_bitmap_destroy(void) |
610 | { |
611 | ffa_value_t ret; |
612 | |
613 | invoke_ffa_fn((ffa_value_t){ |
614 | .a0 = FFA_NOTIFICATION_BITMAP_DESTROY, |
615 | .a1 = drv_info->vm_id, |
616 | }, &ret); |
617 | |
618 | if (ret.a0 == FFA_ERROR) |
619 | return ffa_to_linux_errno(errno: (int)ret.a2); |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | #define NOTIFICATION_LOW_MASK GENMASK(31, 0) |
625 | #define NOTIFICATION_HIGH_MASK GENMASK(63, 32) |
626 | #define NOTIFICATION_BITMAP_HIGH(x) \ |
627 | ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x)))) |
628 | #define NOTIFICATION_BITMAP_LOW(x) \ |
629 | ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x)))) |
630 | #define PACK_NOTIFICATION_BITMAP(low, high) \ |
631 | (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \ |
632 | FIELD_PREP(NOTIFICATION_HIGH_MASK, (high))) |
633 | |
634 | #define RECEIVER_VCPU_MASK GENMASK(31, 16) |
635 | #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \ |
636 | (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \ |
637 | FIELD_PREP(RECEIVER_ID_MASK, (r))) |
638 | |
639 | #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0) |
640 | #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7) |
641 | #define ID_LIST_MASK_64 GENMASK(51, 12) |
642 | #define ID_LIST_MASK_32 GENMASK(31, 12) |
643 | #define MAX_IDS_64 20 |
644 | #define MAX_IDS_32 10 |
645 | |
646 | #define PER_VCPU_NOTIFICATION_FLAG BIT(0) |
647 | #define SECURE_PARTITION_BITMAP BIT(0) |
648 | #define NON_SECURE_VM_BITMAP BIT(1) |
649 | #define SPM_FRAMEWORK_BITMAP BIT(2) |
650 | #define NS_HYP_FRAMEWORK_BITMAP BIT(3) |
651 | |
652 | static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, |
653 | u32 flags, bool is_bind) |
654 | { |
655 | ffa_value_t ret; |
656 | u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id); |
657 | |
658 | func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND; |
659 | |
660 | invoke_ffa_fn((ffa_value_t){ |
661 | .a0 = func, .a1 = src_dst_ids, .a2 = flags, |
662 | .a3 = NOTIFICATION_BITMAP_LOW(bitmap), |
663 | .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), |
664 | }, &ret); |
665 | |
666 | if (ret.a0 == FFA_ERROR) |
667 | return ffa_to_linux_errno(errno: (int)ret.a2); |
668 | else if (ret.a0 != FFA_SUCCESS) |
669 | return -EINVAL; |
670 | |
671 | return 0; |
672 | } |
673 | |
674 | static |
675 | int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap) |
676 | { |
677 | ffa_value_t ret; |
678 | u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id); |
679 | |
680 | invoke_ffa_fn((ffa_value_t) { |
681 | .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags, |
682 | .a3 = NOTIFICATION_BITMAP_LOW(bitmap), |
683 | .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), |
684 | }, &ret); |
685 | |
686 | if (ret.a0 == FFA_ERROR) |
687 | return ffa_to_linux_errno(errno: (int)ret.a2); |
688 | else if (ret.a0 != FFA_SUCCESS) |
689 | return -EINVAL; |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | struct ffa_notify_bitmaps { |
695 | u64 sp_map; |
696 | u64 vm_map; |
697 | u64 arch_map; |
698 | }; |
699 | |
700 | static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) |
701 | { |
702 | ffa_value_t ret; |
703 | u16 src_id = drv_info->vm_id; |
704 | u16 cpu_id = smp_processor_id(); |
705 | u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id); |
706 | |
707 | invoke_ffa_fn((ffa_value_t){ |
708 | .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags, |
709 | }, &ret); |
710 | |
711 | if (ret.a0 == FFA_ERROR) |
712 | return ffa_to_linux_errno(errno: (int)ret.a2); |
713 | else if (ret.a0 != FFA_SUCCESS) |
714 | return -EINVAL; /* Something else went wrong. */ |
715 | |
716 | notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); |
717 | notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); |
718 | notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | struct ffa_dev_part_info { |
724 | ffa_sched_recv_cb callback; |
725 | void *cb_data; |
726 | rwlock_t rw_lock; |
727 | }; |
728 | |
729 | static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) |
730 | { |
731 | struct ffa_dev_part_info *partition; |
732 | ffa_sched_recv_cb callback; |
733 | void *cb_data; |
734 | |
735 | partition = xa_load(&drv_info->partition_info, index: part_id); |
736 | if (!partition) { |
737 | pr_err("%s: Invalid partition ID 0x%x\n" , __func__, part_id); |
738 | return; |
739 | } |
740 | |
741 | read_lock(&partition->rw_lock); |
742 | callback = partition->callback; |
743 | cb_data = partition->cb_data; |
744 | read_unlock(&partition->rw_lock); |
745 | |
746 | if (callback) |
747 | callback(vcpu, is_per_vcpu, cb_data); |
748 | } |
749 | |
750 | static void ffa_notification_info_get(void) |
751 | { |
752 | int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64]; |
753 | bool is_64b_resp; |
754 | ffa_value_t ret; |
755 | u64 id_list; |
756 | |
757 | do { |
758 | invoke_ffa_fn((ffa_value_t){ |
759 | .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET), |
760 | }, &ret); |
761 | |
762 | if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { |
763 | if (ret.a2 != FFA_RET_NO_DATA) |
764 | pr_err("Notification Info fetch failed: 0x%lx (0x%lx)" , |
765 | ret.a0, ret.a2); |
766 | return; |
767 | } |
768 | |
769 | is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS); |
770 | |
771 | ids_processed = 0; |
772 | lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2); |
773 | if (is_64b_resp) { |
774 | max_ids = MAX_IDS_64; |
775 | id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2); |
776 | } else { |
777 | max_ids = MAX_IDS_32; |
778 | id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2); |
779 | } |
780 | |
781 | for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2) |
782 | ids_count[idx] = (id_list & 0x3) + 1; |
783 | |
784 | /* Process IDs */ |
785 | for (list = 0; list < lists_cnt; list++) { |
786 | u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3; |
787 | |
788 | if (ids_processed >= max_ids - 1) |
789 | break; |
790 | |
791 | part_id = packed_id_list[ids_processed++]; |
792 | |
793 | if (ids_count[list] == 1) { /* Global Notification */ |
794 | __do_sched_recv_cb(part_id, vcpu: 0, is_per_vcpu: false); |
795 | continue; |
796 | } |
797 | |
798 | /* Per vCPU Notification */ |
799 | for (idx = 0; idx < ids_count[list]; idx++) { |
800 | if (ids_processed >= max_ids - 1) |
801 | break; |
802 | |
803 | vcpu_id = packed_id_list[ids_processed++]; |
804 | |
805 | __do_sched_recv_cb(part_id, vcpu: vcpu_id, is_per_vcpu: true); |
806 | } |
807 | } |
808 | } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK); |
809 | } |
810 | |
811 | static int ffa_run(struct ffa_device *dev, u16 vcpu) |
812 | { |
813 | ffa_value_t ret; |
814 | u32 target = dev->vm_id << 16 | vcpu; |
815 | |
816 | invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret); |
817 | |
818 | while (ret.a0 == FFA_INTERRUPT) |
819 | invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, }, |
820 | &ret); |
821 | |
822 | if (ret.a0 == FFA_ERROR) |
823 | return ffa_to_linux_errno(errno: (int)ret.a2); |
824 | |
825 | return 0; |
826 | } |
827 | |
828 | static void ffa_set_up_mem_ops_native_flag(void) |
829 | { |
830 | if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), input_props: 0, NULL, NULL) || |
831 | !ffa_features(FFA_FN_NATIVE(MEM_SHARE), input_props: 0, NULL, NULL)) |
832 | drv_info->mem_ops_native = true; |
833 | } |
834 | |
835 | static u32 ffa_api_version_get(void) |
836 | { |
837 | return drv_info->version; |
838 | } |
839 | |
840 | static int ffa_partition_info_get(const char *uuid_str, |
841 | struct ffa_partition_info *buffer) |
842 | { |
843 | int count; |
844 | uuid_t uuid; |
845 | struct ffa_partition_info *pbuf; |
846 | |
847 | if (uuid_parse(uuid: uuid_str, u: &uuid)) { |
848 | pr_err("invalid uuid (%s)\n" , uuid_str); |
849 | return -ENODEV; |
850 | } |
851 | |
852 | count = ffa_partition_probe(uuid: &uuid, buffer: &pbuf); |
853 | if (count <= 0) |
854 | return -ENOENT; |
855 | |
856 | memcpy(buffer, pbuf, sizeof(*pbuf) * count); |
857 | kfree(objp: pbuf); |
858 | return 0; |
859 | } |
860 | |
861 | static void ffa_mode_32bit_set(struct ffa_device *dev) |
862 | { |
863 | dev->mode_32bit = true; |
864 | } |
865 | |
866 | static int ffa_sync_send_receive(struct ffa_device *dev, |
867 | struct ffa_send_direct_data *data) |
868 | { |
869 | return ffa_msg_send_direct_req(src_id: drv_info->vm_id, dst_id: dev->vm_id, |
870 | mode_32bit: dev->mode_32bit, data); |
871 | } |
872 | |
873 | static int ffa_memory_share(struct ffa_mem_ops_args *args) |
874 | { |
875 | if (drv_info->mem_ops_native) |
876 | return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); |
877 | |
878 | return ffa_memory_ops(FFA_MEM_SHARE, args); |
879 | } |
880 | |
881 | static int ffa_memory_lend(struct ffa_mem_ops_args *args) |
882 | { |
883 | /* Note that upon a successful MEM_LEND request the caller |
884 | * must ensure that the memory region specified is not accessed |
885 | * until a successful MEM_RECALIM call has been made. |
886 | * On systems with a hypervisor present this will been enforced, |
887 | * however on systems without a hypervisor the responsibility |
888 | * falls to the calling kernel driver to prevent access. |
889 | */ |
890 | if (drv_info->mem_ops_native) |
891 | return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); |
892 | |
893 | return ffa_memory_ops(FFA_MEM_LEND, args); |
894 | } |
895 | |
896 | #define FFA_SECURE_PARTITION_ID_FLAG BIT(15) |
897 | |
898 | #define ffa_notifications_disabled() (!drv_info->notif_enabled) |
899 | |
900 | enum notify_type { |
901 | NON_SECURE_VM, |
902 | SECURE_PARTITION, |
903 | FRAMEWORK, |
904 | }; |
905 | |
906 | struct notifier_cb_info { |
907 | struct hlist_node hnode; |
908 | ffa_notifier_cb cb; |
909 | void *cb_data; |
910 | enum notify_type type; |
911 | }; |
912 | |
913 | static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, |
914 | void *cb_data, bool is_registration) |
915 | { |
916 | struct ffa_dev_part_info *partition; |
917 | bool cb_valid; |
918 | |
919 | if (ffa_notifications_disabled()) |
920 | return -EOPNOTSUPP; |
921 | |
922 | partition = xa_load(&drv_info->partition_info, index: part_id); |
923 | if (!partition) { |
924 | pr_err("%s: Invalid partition ID 0x%x\n" , __func__, part_id); |
925 | return -EINVAL; |
926 | } |
927 | |
928 | write_lock(&partition->rw_lock); |
929 | |
930 | cb_valid = !!partition->callback; |
931 | if (!(is_registration ^ cb_valid)) { |
932 | write_unlock(&partition->rw_lock); |
933 | return -EINVAL; |
934 | } |
935 | |
936 | partition->callback = callback; |
937 | partition->cb_data = cb_data; |
938 | |
939 | write_unlock(&partition->rw_lock); |
940 | return 0; |
941 | } |
942 | |
943 | static int ffa_sched_recv_cb_register(struct ffa_device *dev, |
944 | ffa_sched_recv_cb cb, void *cb_data) |
945 | { |
946 | return ffa_sched_recv_cb_update(part_id: dev->vm_id, callback: cb, cb_data, is_registration: true); |
947 | } |
948 | |
949 | static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) |
950 | { |
951 | return ffa_sched_recv_cb_update(part_id: dev->vm_id, NULL, NULL, is_registration: false); |
952 | } |
953 | |
954 | static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) |
955 | { |
956 | return ffa_notification_bind_common(dst_id, bitmap, flags, is_bind: true); |
957 | } |
958 | |
959 | static int ffa_notification_unbind(u16 dst_id, u64 bitmap) |
960 | { |
961 | return ffa_notification_bind_common(dst_id, bitmap, flags: 0, is_bind: false); |
962 | } |
963 | |
964 | /* Should be called while the notify_lock is taken */ |
965 | static struct notifier_cb_info * |
966 | notifier_hash_node_get(u16 notify_id, enum notify_type type) |
967 | { |
968 | struct notifier_cb_info *node; |
969 | |
970 | hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) |
971 | if (type == node->type) |
972 | return node; |
973 | |
974 | return NULL; |
975 | } |
976 | |
977 | static int |
978 | update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, |
979 | void *cb_data, bool is_registration) |
980 | { |
981 | struct notifier_cb_info *cb_info = NULL; |
982 | bool cb_found; |
983 | |
984 | cb_info = notifier_hash_node_get(notify_id, type); |
985 | cb_found = !!cb_info; |
986 | |
987 | if (!(is_registration ^ cb_found)) |
988 | return -EINVAL; |
989 | |
990 | if (is_registration) { |
991 | cb_info = kzalloc(size: sizeof(*cb_info), GFP_KERNEL); |
992 | if (!cb_info) |
993 | return -ENOMEM; |
994 | |
995 | cb_info->type = type; |
996 | cb_info->cb = cb; |
997 | cb_info->cb_data = cb_data; |
998 | |
999 | hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); |
1000 | } else { |
1001 | hash_del(node: &cb_info->hnode); |
1002 | } |
1003 | |
1004 | return 0; |
1005 | } |
1006 | |
1007 | static enum notify_type ffa_notify_type_get(u16 vm_id) |
1008 | { |
1009 | if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) |
1010 | return SECURE_PARTITION; |
1011 | else |
1012 | return NON_SECURE_VM; |
1013 | } |
1014 | |
1015 | static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) |
1016 | { |
1017 | int rc; |
1018 | enum notify_type type = ffa_notify_type_get(vm_id: dev->vm_id); |
1019 | |
1020 | if (ffa_notifications_disabled()) |
1021 | return -EOPNOTSUPP; |
1022 | |
1023 | if (notify_id >= FFA_MAX_NOTIFICATIONS) |
1024 | return -EINVAL; |
1025 | |
1026 | mutex_lock(&drv_info->notify_lock); |
1027 | |
1028 | rc = update_notifier_cb(notify_id, type, NULL, NULL, is_registration: false); |
1029 | if (rc) { |
1030 | pr_err("Could not unregister notification callback\n" ); |
1031 | mutex_unlock(lock: &drv_info->notify_lock); |
1032 | return rc; |
1033 | } |
1034 | |
1035 | rc = ffa_notification_unbind(dst_id: dev->vm_id, BIT(notify_id)); |
1036 | |
1037 | mutex_unlock(lock: &drv_info->notify_lock); |
1038 | |
1039 | return rc; |
1040 | } |
1041 | |
1042 | static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, |
1043 | ffa_notifier_cb cb, void *cb_data, int notify_id) |
1044 | { |
1045 | int rc; |
1046 | u32 flags = 0; |
1047 | enum notify_type type = ffa_notify_type_get(vm_id: dev->vm_id); |
1048 | |
1049 | if (ffa_notifications_disabled()) |
1050 | return -EOPNOTSUPP; |
1051 | |
1052 | if (notify_id >= FFA_MAX_NOTIFICATIONS) |
1053 | return -EINVAL; |
1054 | |
1055 | mutex_lock(&drv_info->notify_lock); |
1056 | |
1057 | if (is_per_vcpu) |
1058 | flags = PER_VCPU_NOTIFICATION_FLAG; |
1059 | |
1060 | rc = ffa_notification_bind(dst_id: dev->vm_id, BIT(notify_id), flags); |
1061 | if (rc) { |
1062 | mutex_unlock(lock: &drv_info->notify_lock); |
1063 | return rc; |
1064 | } |
1065 | |
1066 | rc = update_notifier_cb(notify_id, type, cb, cb_data, is_registration: true); |
1067 | if (rc) { |
1068 | pr_err("Failed to register callback for %d - %d\n" , |
1069 | notify_id, rc); |
1070 | ffa_notification_unbind(dst_id: dev->vm_id, BIT(notify_id)); |
1071 | } |
1072 | mutex_unlock(lock: &drv_info->notify_lock); |
1073 | |
1074 | return rc; |
1075 | } |
1076 | |
1077 | static int ffa_notify_send(struct ffa_device *dev, int notify_id, |
1078 | bool is_per_vcpu, u16 vcpu) |
1079 | { |
1080 | u32 flags = 0; |
1081 | |
1082 | if (ffa_notifications_disabled()) |
1083 | return -EOPNOTSUPP; |
1084 | |
1085 | if (is_per_vcpu) |
1086 | flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16); |
1087 | |
1088 | return ffa_notification_set(src_id: dev->vm_id, dst_id: drv_info->vm_id, flags, |
1089 | BIT(notify_id)); |
1090 | } |
1091 | |
1092 | static void handle_notif_callbacks(u64 bitmap, enum notify_type type) |
1093 | { |
1094 | int notify_id; |
1095 | struct notifier_cb_info *cb_info = NULL; |
1096 | |
1097 | for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap; |
1098 | notify_id++, bitmap >>= 1) { |
1099 | if (!(bitmap & 1)) |
1100 | continue; |
1101 | |
1102 | mutex_lock(&drv_info->notify_lock); |
1103 | cb_info = notifier_hash_node_get(notify_id, type); |
1104 | mutex_unlock(lock: &drv_info->notify_lock); |
1105 | |
1106 | if (cb_info && cb_info->cb) |
1107 | cb_info->cb(notify_id, cb_info->cb_data); |
1108 | } |
1109 | } |
1110 | |
1111 | static void notif_pcpu_irq_work_fn(struct work_struct *work) |
1112 | { |
1113 | int rc; |
1114 | struct ffa_notify_bitmaps bitmaps; |
1115 | |
1116 | rc = ffa_notification_get(SECURE_PARTITION_BITMAP | |
1117 | SPM_FRAMEWORK_BITMAP, notify: &bitmaps); |
1118 | if (rc) { |
1119 | pr_err("Failed to retrieve notifications with %d!\n" , rc); |
1120 | return; |
1121 | } |
1122 | |
1123 | handle_notif_callbacks(bitmap: bitmaps.vm_map, type: NON_SECURE_VM); |
1124 | handle_notif_callbacks(bitmap: bitmaps.sp_map, type: SECURE_PARTITION); |
1125 | handle_notif_callbacks(bitmap: bitmaps.arch_map, type: FRAMEWORK); |
1126 | } |
1127 | |
1128 | static void |
1129 | ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) |
1130 | { |
1131 | struct ffa_drv_info *info = cb_data; |
1132 | |
1133 | if (!is_per_vcpu) |
1134 | notif_pcpu_irq_work_fn(work: &info->notif_pcpu_work); |
1135 | else |
1136 | queue_work_on(cpu: vcpu, wq: info->notif_pcpu_wq, |
1137 | work: &info->notif_pcpu_work); |
1138 | } |
1139 | |
1140 | static const struct ffa_info_ops ffa_drv_info_ops = { |
1141 | .api_version_get = ffa_api_version_get, |
1142 | .partition_info_get = ffa_partition_info_get, |
1143 | }; |
1144 | |
1145 | static const struct ffa_msg_ops ffa_drv_msg_ops = { |
1146 | .mode_32bit_set = ffa_mode_32bit_set, |
1147 | .sync_send_receive = ffa_sync_send_receive, |
1148 | }; |
1149 | |
1150 | static const struct ffa_mem_ops ffa_drv_mem_ops = { |
1151 | .memory_reclaim = ffa_memory_reclaim, |
1152 | .memory_share = ffa_memory_share, |
1153 | .memory_lend = ffa_memory_lend, |
1154 | }; |
1155 | |
1156 | static const struct ffa_cpu_ops ffa_drv_cpu_ops = { |
1157 | .run = ffa_run, |
1158 | }; |
1159 | |
1160 | static const struct ffa_notifier_ops ffa_drv_notifier_ops = { |
1161 | .sched_recv_cb_register = ffa_sched_recv_cb_register, |
1162 | .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, |
1163 | .notify_request = ffa_notify_request, |
1164 | .notify_relinquish = ffa_notify_relinquish, |
1165 | .notify_send = ffa_notify_send, |
1166 | }; |
1167 | |
1168 | static const struct ffa_ops ffa_drv_ops = { |
1169 | .info_ops = &ffa_drv_info_ops, |
1170 | .msg_ops = &ffa_drv_msg_ops, |
1171 | .mem_ops = &ffa_drv_mem_ops, |
1172 | .cpu_ops = &ffa_drv_cpu_ops, |
1173 | .notifier_ops = &ffa_drv_notifier_ops, |
1174 | }; |
1175 | |
1176 | void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) |
1177 | { |
1178 | int count, idx; |
1179 | struct ffa_partition_info *pbuf, *tpbuf; |
1180 | |
1181 | /* |
1182 | * FF-A v1.1 provides UUID for each partition as part of the discovery |
1183 | * API, the discovered UUID must be populated in the device's UUID and |
1184 | * there is no need to copy the same from the driver table. |
1185 | */ |
1186 | if (drv_info->version > FFA_VERSION_1_0) |
1187 | return; |
1188 | |
1189 | count = ffa_partition_probe(uuid, buffer: &pbuf); |
1190 | if (count <= 0) |
1191 | return; |
1192 | |
1193 | for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) |
1194 | if (tpbuf->id == ffa_dev->vm_id) |
1195 | uuid_copy(dst: &ffa_dev->uuid, src: uuid); |
1196 | kfree(objp: pbuf); |
1197 | } |
1198 | |
1199 | static int ffa_setup_partitions(void) |
1200 | { |
1201 | int count, idx, ret; |
1202 | uuid_t uuid; |
1203 | struct ffa_device *ffa_dev; |
1204 | struct ffa_dev_part_info *info; |
1205 | struct ffa_partition_info *pbuf, *tpbuf; |
1206 | |
1207 | count = ffa_partition_probe(uuid: &uuid_null, buffer: &pbuf); |
1208 | if (count <= 0) { |
1209 | pr_info("%s: No partitions found, error %d\n" , __func__, count); |
1210 | return -EINVAL; |
1211 | } |
1212 | |
1213 | xa_init(xa: &drv_info->partition_info); |
1214 | for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { |
1215 | import_uuid(dst: &uuid, src: (u8 *)tpbuf->uuid); |
1216 | |
1217 | /* Note that if the UUID will be uuid_null, that will require |
1218 | * ffa_device_match() to find the UUID of this partition id |
1219 | * with help of ffa_device_match_uuid(). FF-A v1.1 and above |
1220 | * provides UUID here for each partition as part of the |
1221 | * discovery API and the same is passed. |
1222 | */ |
1223 | ffa_dev = ffa_device_register(uuid: &uuid, vm_id: tpbuf->id, ops: &ffa_drv_ops); |
1224 | if (!ffa_dev) { |
1225 | pr_err("%s: failed to register partition ID 0x%x\n" , |
1226 | __func__, tpbuf->id); |
1227 | continue; |
1228 | } |
1229 | |
1230 | if (drv_info->version > FFA_VERSION_1_0 && |
1231 | !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) |
1232 | ffa_mode_32bit_set(dev: ffa_dev); |
1233 | |
1234 | info = kzalloc(size: sizeof(*info), GFP_KERNEL); |
1235 | if (!info) { |
1236 | ffa_device_unregister(dev: ffa_dev); |
1237 | continue; |
1238 | } |
1239 | rwlock_init(&info->rw_lock); |
1240 | ret = xa_insert(xa: &drv_info->partition_info, index: tpbuf->id, |
1241 | entry: info, GFP_KERNEL); |
1242 | if (ret) { |
1243 | pr_err("%s: failed to save partition ID 0x%x - ret:%d\n" , |
1244 | __func__, tpbuf->id, ret); |
1245 | ffa_device_unregister(dev: ffa_dev); |
1246 | kfree(objp: info); |
1247 | } |
1248 | } |
1249 | |
1250 | kfree(objp: pbuf); |
1251 | |
1252 | /* Allocate for the host */ |
1253 | info = kzalloc(size: sizeof(*info), GFP_KERNEL); |
1254 | if (!info) { |
1255 | pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n" , |
1256 | __func__, drv_info->vm_id); |
1257 | /* Already registered devices are freed on bus_exit */ |
1258 | ffa_partitions_cleanup(); |
1259 | return -ENOMEM; |
1260 | } |
1261 | |
1262 | rwlock_init(&info->rw_lock); |
1263 | ret = xa_insert(xa: &drv_info->partition_info, index: drv_info->vm_id, |
1264 | entry: info, GFP_KERNEL); |
1265 | if (ret) { |
1266 | pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n" , |
1267 | __func__, drv_info->vm_id, ret); |
1268 | kfree(objp: info); |
1269 | /* Already registered devices are freed on bus_exit */ |
1270 | ffa_partitions_cleanup(); |
1271 | } |
1272 | |
1273 | return ret; |
1274 | } |
1275 | |
1276 | static void ffa_partitions_cleanup(void) |
1277 | { |
1278 | struct ffa_dev_part_info *info; |
1279 | unsigned long idx; |
1280 | |
1281 | xa_for_each(&drv_info->partition_info, idx, info) { |
1282 | xa_erase(&drv_info->partition_info, index: idx); |
1283 | kfree(objp: info); |
1284 | } |
1285 | |
1286 | xa_destroy(&drv_info->partition_info); |
1287 | } |
1288 | |
1289 | /* FFA FEATURE IDs */ |
1290 | #define FFA_FEAT_NOTIFICATION_PENDING_INT (1) |
1291 | #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) |
1292 | #define FFA_FEAT_MANAGED_EXIT_INT (3) |
1293 | |
1294 | static irqreturn_t irq_handler(int irq, void *irq_data) |
1295 | { |
1296 | struct ffa_pcpu_irq *pcpu = irq_data; |
1297 | struct ffa_drv_info *info = pcpu->info; |
1298 | |
1299 | queue_work(wq: info->notif_pcpu_wq, work: &info->irq_work); |
1300 | |
1301 | return IRQ_HANDLED; |
1302 | } |
1303 | |
1304 | static void ffa_sched_recv_irq_work_fn(struct work_struct *work) |
1305 | { |
1306 | ffa_notification_info_get(); |
1307 | } |
1308 | |
1309 | static int ffa_sched_recv_irq_map(void) |
1310 | { |
1311 | int ret, irq, sr_intid; |
1312 | |
1313 | /* The returned sr_intid is assumed to be SGI donated to NS world */ |
1314 | ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, input_props: 0, if_props_1: &sr_intid, NULL); |
1315 | if (ret < 0) { |
1316 | if (ret != -EOPNOTSUPP) |
1317 | pr_err("Failed to retrieve scheduler Rx interrupt\n" ); |
1318 | return ret; |
1319 | } |
1320 | |
1321 | if (acpi_disabled) { |
1322 | struct of_phandle_args oirq = {}; |
1323 | struct device_node *gic; |
1324 | |
1325 | /* Only GICv3 supported currently with the device tree */ |
1326 | gic = of_find_compatible_node(NULL, NULL, compat: "arm,gic-v3" ); |
1327 | if (!gic) |
1328 | return -ENXIO; |
1329 | |
1330 | oirq.np = gic; |
1331 | oirq.args_count = 1; |
1332 | oirq.args[0] = sr_intid; |
1333 | irq = irq_create_of_mapping(irq_data: &oirq); |
1334 | of_node_put(node: gic); |
1335 | #ifdef CONFIG_ACPI |
1336 | } else { |
1337 | irq = acpi_register_gsi(NULL, gsi: sr_intid, ACPI_EDGE_SENSITIVE, |
1338 | ACPI_ACTIVE_HIGH); |
1339 | #endif |
1340 | } |
1341 | |
1342 | if (irq <= 0) { |
1343 | pr_err("Failed to create IRQ mapping!\n" ); |
1344 | return -ENODATA; |
1345 | } |
1346 | |
1347 | return irq; |
1348 | } |
1349 | |
1350 | static void ffa_sched_recv_irq_unmap(void) |
1351 | { |
1352 | if (drv_info->sched_recv_irq) { |
1353 | irq_dispose_mapping(virq: drv_info->sched_recv_irq); |
1354 | drv_info->sched_recv_irq = 0; |
1355 | } |
1356 | } |
1357 | |
1358 | static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) |
1359 | { |
1360 | enable_percpu_irq(irq: drv_info->sched_recv_irq, type: IRQ_TYPE_NONE); |
1361 | return 0; |
1362 | } |
1363 | |
1364 | static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) |
1365 | { |
1366 | disable_percpu_irq(irq: drv_info->sched_recv_irq); |
1367 | return 0; |
1368 | } |
1369 | |
1370 | static void ffa_uninit_pcpu_irq(void) |
1371 | { |
1372 | if (drv_info->cpuhp_state) { |
1373 | cpuhp_remove_state(state: drv_info->cpuhp_state); |
1374 | drv_info->cpuhp_state = 0; |
1375 | } |
1376 | |
1377 | if (drv_info->notif_pcpu_wq) { |
1378 | destroy_workqueue(wq: drv_info->notif_pcpu_wq); |
1379 | drv_info->notif_pcpu_wq = NULL; |
1380 | } |
1381 | |
1382 | if (drv_info->sched_recv_irq) |
1383 | free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); |
1384 | |
1385 | if (drv_info->irq_pcpu) { |
1386 | free_percpu(pdata: drv_info->irq_pcpu); |
1387 | drv_info->irq_pcpu = NULL; |
1388 | } |
1389 | } |
1390 | |
1391 | static int ffa_init_pcpu_irq(unsigned int irq) |
1392 | { |
1393 | struct ffa_pcpu_irq __percpu *irq_pcpu; |
1394 | int ret, cpu; |
1395 | |
1396 | irq_pcpu = alloc_percpu(struct ffa_pcpu_irq); |
1397 | if (!irq_pcpu) |
1398 | return -ENOMEM; |
1399 | |
1400 | for_each_present_cpu(cpu) |
1401 | per_cpu_ptr(irq_pcpu, cpu)->info = drv_info; |
1402 | |
1403 | drv_info->irq_pcpu = irq_pcpu; |
1404 | |
1405 | ret = request_percpu_irq(irq, handler: irq_handler, devname: "ARM-FFA" , percpu_dev_id: irq_pcpu); |
1406 | if (ret) { |
1407 | pr_err("Error registering notification IRQ %d: %d\n" , irq, ret); |
1408 | return ret; |
1409 | } |
1410 | |
1411 | INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); |
1412 | INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); |
1413 | drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification" ); |
1414 | if (!drv_info->notif_pcpu_wq) |
1415 | return -EINVAL; |
1416 | |
1417 | ret = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "ffa/pcpu-irq:starting" , |
1418 | startup: ffa_cpuhp_pcpu_irq_enable, |
1419 | teardown: ffa_cpuhp_pcpu_irq_disable); |
1420 | |
1421 | if (ret < 0) |
1422 | return ret; |
1423 | |
1424 | drv_info->cpuhp_state = ret; |
1425 | return 0; |
1426 | } |
1427 | |
1428 | static void ffa_notifications_cleanup(void) |
1429 | { |
1430 | ffa_uninit_pcpu_irq(); |
1431 | ffa_sched_recv_irq_unmap(); |
1432 | |
1433 | if (drv_info->bitmap_created) { |
1434 | ffa_notification_bitmap_destroy(); |
1435 | drv_info->bitmap_created = false; |
1436 | } |
1437 | drv_info->notif_enabled = false; |
1438 | } |
1439 | |
1440 | static void ffa_notifications_setup(void) |
1441 | { |
1442 | int ret, irq; |
1443 | |
1444 | ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, input_props: 0, NULL, NULL); |
1445 | if (ret) { |
1446 | pr_info("Notifications not supported, continuing with it ..\n" ); |
1447 | return; |
1448 | } |
1449 | |
1450 | ret = ffa_notification_bitmap_create(); |
1451 | if (ret) { |
1452 | pr_info("Notification bitmap create error %d\n" , ret); |
1453 | return; |
1454 | } |
1455 | drv_info->bitmap_created = true; |
1456 | |
1457 | irq = ffa_sched_recv_irq_map(); |
1458 | if (irq <= 0) { |
1459 | ret = irq; |
1460 | goto cleanup; |
1461 | } |
1462 | |
1463 | drv_info->sched_recv_irq = irq; |
1464 | |
1465 | ret = ffa_init_pcpu_irq(irq); |
1466 | if (ret) |
1467 | goto cleanup; |
1468 | |
1469 | hash_init(drv_info->notifier_hash); |
1470 | mutex_init(&drv_info->notify_lock); |
1471 | |
1472 | drv_info->notif_enabled = true; |
1473 | return; |
1474 | cleanup: |
1475 | pr_info("Notification setup failed %d, not enabled\n" , ret); |
1476 | ffa_notifications_cleanup(); |
1477 | } |
1478 | |
1479 | static int __init ffa_init(void) |
1480 | { |
1481 | int ret; |
1482 | |
1483 | ret = ffa_transport_init(invoke_ffa_fn: &invoke_ffa_fn); |
1484 | if (ret) |
1485 | return ret; |
1486 | |
1487 | ret = arm_ffa_bus_init(); |
1488 | if (ret) |
1489 | return ret; |
1490 | |
1491 | drv_info = kzalloc(size: sizeof(*drv_info), GFP_KERNEL); |
1492 | if (!drv_info) { |
1493 | ret = -ENOMEM; |
1494 | goto ffa_bus_exit; |
1495 | } |
1496 | |
1497 | ret = ffa_version_check(version: &drv_info->version); |
1498 | if (ret) |
1499 | goto free_drv_info; |
1500 | |
1501 | if (ffa_id_get(vm_id: &drv_info->vm_id)) { |
1502 | pr_err("failed to obtain VM id for self\n" ); |
1503 | ret = -ENODEV; |
1504 | goto free_drv_info; |
1505 | } |
1506 | |
1507 | drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); |
1508 | if (!drv_info->rx_buffer) { |
1509 | ret = -ENOMEM; |
1510 | goto free_pages; |
1511 | } |
1512 | |
1513 | drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); |
1514 | if (!drv_info->tx_buffer) { |
1515 | ret = -ENOMEM; |
1516 | goto free_pages; |
1517 | } |
1518 | |
1519 | ret = ffa_rxtx_map(virt_to_phys(address: drv_info->tx_buffer), |
1520 | virt_to_phys(address: drv_info->rx_buffer), |
1521 | RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); |
1522 | if (ret) { |
1523 | pr_err("failed to register FFA RxTx buffers\n" ); |
1524 | goto free_pages; |
1525 | } |
1526 | |
1527 | mutex_init(&drv_info->rx_lock); |
1528 | mutex_init(&drv_info->tx_lock); |
1529 | |
1530 | ffa_set_up_mem_ops_native_flag(); |
1531 | |
1532 | ffa_notifications_setup(); |
1533 | |
1534 | ret = ffa_setup_partitions(); |
1535 | if (ret) { |
1536 | pr_err("failed to setup partitions\n" ); |
1537 | goto cleanup_notifs; |
1538 | } |
1539 | |
1540 | ret = ffa_sched_recv_cb_update(part_id: drv_info->vm_id, callback: ffa_self_notif_handle, |
1541 | cb_data: drv_info, is_registration: true); |
1542 | if (ret) |
1543 | pr_info("Failed to register driver sched callback %d\n" , ret); |
1544 | |
1545 | return 0; |
1546 | |
1547 | cleanup_notifs: |
1548 | ffa_notifications_cleanup(); |
1549 | free_pages: |
1550 | if (drv_info->tx_buffer) |
1551 | free_pages_exact(virt: drv_info->tx_buffer, RXTX_BUFFER_SIZE); |
1552 | free_pages_exact(virt: drv_info->rx_buffer, RXTX_BUFFER_SIZE); |
1553 | free_drv_info: |
1554 | kfree(objp: drv_info); |
1555 | ffa_bus_exit: |
1556 | arm_ffa_bus_exit(); |
1557 | return ret; |
1558 | } |
1559 | subsys_initcall(ffa_init); |
1560 | |
1561 | static void __exit ffa_exit(void) |
1562 | { |
1563 | ffa_notifications_cleanup(); |
1564 | ffa_partitions_cleanup(); |
1565 | ffa_rxtx_unmap(vm_id: drv_info->vm_id); |
1566 | free_pages_exact(virt: drv_info->tx_buffer, RXTX_BUFFER_SIZE); |
1567 | free_pages_exact(virt: drv_info->rx_buffer, RXTX_BUFFER_SIZE); |
1568 | kfree(objp: drv_info); |
1569 | arm_ffa_bus_exit(); |
1570 | } |
1571 | module_exit(ffa_exit); |
1572 | |
1573 | MODULE_ALIAS("arm-ffa" ); |
1574 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>" ); |
1575 | MODULE_DESCRIPTION("Arm FF-A interface driver" ); |
1576 | MODULE_LICENSE("GPL v2" ); |
1577 | |