1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * SPU file system -- file contents |
4 | * |
5 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
6 | * |
7 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
8 | */ |
9 | |
10 | #undef DEBUG |
11 | |
12 | #include <linux/coredump.h> |
13 | #include <linux/fs.h> |
14 | #include <linux/ioctl.h> |
15 | #include <linux/export.h> |
16 | #include <linux/pagemap.h> |
17 | #include <linux/poll.h> |
18 | #include <linux/ptrace.h> |
19 | #include <linux/seq_file.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include <asm/io.h> |
23 | #include <asm/time.h> |
24 | #include <asm/spu.h> |
25 | #include <asm/spu_info.h> |
26 | #include <linux/uaccess.h> |
27 | |
28 | #include "spufs.h" |
29 | #include "sputrace.h" |
30 | |
31 | #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) |
32 | |
33 | /* Simple attribute files */ |
34 | struct spufs_attr { |
35 | int (*get)(void *, u64 *); |
36 | int (*set)(void *, u64); |
37 | char get_buf[24]; /* enough to store a u64 and "\n\0" */ |
38 | char set_buf[24]; |
39 | void *data; |
40 | const char *fmt; /* format for read operation */ |
41 | struct mutex mutex; /* protects access to these buffers */ |
42 | }; |
43 | |
44 | static int spufs_attr_open(struct inode *inode, struct file *file, |
45 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
46 | const char *fmt) |
47 | { |
48 | struct spufs_attr *attr; |
49 | |
50 | attr = kmalloc(sizeof(*attr), GFP_KERNEL); |
51 | if (!attr) |
52 | return -ENOMEM; |
53 | |
54 | attr->get = get; |
55 | attr->set = set; |
56 | attr->data = inode->i_private; |
57 | attr->fmt = fmt; |
58 | mutex_init(&attr->mutex); |
59 | file->private_data = attr; |
60 | |
61 | return nonseekable_open(inode, filp: file); |
62 | } |
63 | |
64 | static int spufs_attr_release(struct inode *inode, struct file *file) |
65 | { |
66 | kfree(objp: file->private_data); |
67 | return 0; |
68 | } |
69 | |
70 | static ssize_t spufs_attr_read(struct file *file, char __user *buf, |
71 | size_t len, loff_t *ppos) |
72 | { |
73 | struct spufs_attr *attr; |
74 | size_t size; |
75 | ssize_t ret; |
76 | |
77 | attr = file->private_data; |
78 | if (!attr->get) |
79 | return -EACCES; |
80 | |
81 | ret = mutex_lock_interruptible(&attr->mutex); |
82 | if (ret) |
83 | return ret; |
84 | |
85 | if (*ppos) { /* continued read */ |
86 | size = strlen(attr->get_buf); |
87 | } else { /* first read */ |
88 | u64 val; |
89 | ret = attr->get(attr->data, &val); |
90 | if (ret) |
91 | goto out; |
92 | |
93 | size = scnprintf(buf: attr->get_buf, size: sizeof(attr->get_buf), |
94 | fmt: attr->fmt, (unsigned long long)val); |
95 | } |
96 | |
97 | ret = simple_read_from_buffer(to: buf, count: len, ppos, from: attr->get_buf, available: size); |
98 | out: |
99 | mutex_unlock(lock: &attr->mutex); |
100 | return ret; |
101 | } |
102 | |
103 | static ssize_t spufs_attr_write(struct file *file, const char __user *buf, |
104 | size_t len, loff_t *ppos) |
105 | { |
106 | struct spufs_attr *attr; |
107 | u64 val; |
108 | size_t size; |
109 | ssize_t ret; |
110 | |
111 | attr = file->private_data; |
112 | if (!attr->set) |
113 | return -EACCES; |
114 | |
115 | ret = mutex_lock_interruptible(&attr->mutex); |
116 | if (ret) |
117 | return ret; |
118 | |
119 | ret = -EFAULT; |
120 | size = min(sizeof(attr->set_buf) - 1, len); |
121 | if (copy_from_user(to: attr->set_buf, from: buf, n: size)) |
122 | goto out; |
123 | |
124 | ret = len; /* claim we got the whole input */ |
125 | attr->set_buf[size] = '\0'; |
126 | val = simple_strtol(attr->set_buf, NULL, 0); |
127 | attr->set(attr->data, val); |
128 | out: |
129 | mutex_unlock(lock: &attr->mutex); |
130 | return ret; |
131 | } |
132 | |
133 | static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf, |
134 | size_t size) |
135 | { |
136 | if (!dump_emit(cprm, addr: buf, nr: size)) |
137 | return -EIO; |
138 | return size; |
139 | } |
140 | |
141 | #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
142 | static int __fops ## _open(struct inode *inode, struct file *file) \ |
143 | { \ |
144 | __simple_attr_check_format(__fmt, 0ull); \ |
145 | return spufs_attr_open(inode, file, __get, __set, __fmt); \ |
146 | } \ |
147 | static const struct file_operations __fops = { \ |
148 | .open = __fops ## _open, \ |
149 | .release = spufs_attr_release, \ |
150 | .read = spufs_attr_read, \ |
151 | .write = spufs_attr_write, \ |
152 | .llseek = generic_file_llseek, \ |
153 | }; |
154 | |
155 | |
156 | static int |
157 | spufs_mem_open(struct inode *inode, struct file *file) |
158 | { |
159 | struct spufs_inode_info *i = SPUFS_I(inode); |
160 | struct spu_context *ctx = i->i_ctx; |
161 | |
162 | mutex_lock(&ctx->mapping_lock); |
163 | file->private_data = ctx; |
164 | if (!i->i_openers++) |
165 | ctx->local_store = inode->i_mapping; |
166 | mutex_unlock(lock: &ctx->mapping_lock); |
167 | return 0; |
168 | } |
169 | |
170 | static int |
171 | spufs_mem_release(struct inode *inode, struct file *file) |
172 | { |
173 | struct spufs_inode_info *i = SPUFS_I(inode); |
174 | struct spu_context *ctx = i->i_ctx; |
175 | |
176 | mutex_lock(&ctx->mapping_lock); |
177 | if (!--i->i_openers) |
178 | ctx->local_store = NULL; |
179 | mutex_unlock(lock: &ctx->mapping_lock); |
180 | return 0; |
181 | } |
182 | |
183 | static ssize_t |
184 | spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm) |
185 | { |
186 | return spufs_dump_emit(cprm, buf: ctx->ops->get_ls(ctx), size: LS_SIZE); |
187 | } |
188 | |
189 | static ssize_t |
190 | spufs_mem_read(struct file *file, char __user *buffer, |
191 | size_t size, loff_t *pos) |
192 | { |
193 | struct spu_context *ctx = file->private_data; |
194 | ssize_t ret; |
195 | |
196 | ret = spu_acquire(ctx); |
197 | if (ret) |
198 | return ret; |
199 | ret = simple_read_from_buffer(to: buffer, count: size, ppos: pos, from: ctx->ops->get_ls(ctx), |
200 | available: LS_SIZE); |
201 | spu_release(ctx); |
202 | |
203 | return ret; |
204 | } |
205 | |
206 | static ssize_t |
207 | spufs_mem_write(struct file *file, const char __user *buffer, |
208 | size_t size, loff_t *ppos) |
209 | { |
210 | struct spu_context *ctx = file->private_data; |
211 | char *local_store; |
212 | loff_t pos = *ppos; |
213 | int ret; |
214 | |
215 | if (pos > LS_SIZE) |
216 | return -EFBIG; |
217 | |
218 | ret = spu_acquire(ctx); |
219 | if (ret) |
220 | return ret; |
221 | |
222 | local_store = ctx->ops->get_ls(ctx); |
223 | size = simple_write_to_buffer(to: local_store, available: LS_SIZE, ppos, from: buffer, count: size); |
224 | spu_release(ctx); |
225 | |
226 | return size; |
227 | } |
228 | |
229 | static vm_fault_t |
230 | spufs_mem_mmap_fault(struct vm_fault *vmf) |
231 | { |
232 | struct vm_area_struct *vma = vmf->vma; |
233 | struct spu_context *ctx = vma->vm_file->private_data; |
234 | unsigned long pfn, offset; |
235 | vm_fault_t ret; |
236 | |
237 | offset = vmf->pgoff << PAGE_SHIFT; |
238 | if (offset >= LS_SIZE) |
239 | return VM_FAULT_SIGBUS; |
240 | |
241 | pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n" , |
242 | vmf->address, offset); |
243 | |
244 | if (spu_acquire(ctx)) |
245 | return VM_FAULT_NOPAGE; |
246 | |
247 | if (ctx->state == SPU_STATE_SAVED) { |
248 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); |
249 | pfn = vmalloc_to_pfn(addr: ctx->csa.lscsa->ls + offset); |
250 | } else { |
251 | vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); |
252 | pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; |
253 | } |
254 | ret = vmf_insert_pfn(vma, addr: vmf->address, pfn); |
255 | |
256 | spu_release(ctx); |
257 | |
258 | return ret; |
259 | } |
260 | |
261 | static int spufs_mem_mmap_access(struct vm_area_struct *vma, |
262 | unsigned long address, |
263 | void *buf, int len, int write) |
264 | { |
265 | struct spu_context *ctx = vma->vm_file->private_data; |
266 | unsigned long offset = address - vma->vm_start; |
267 | char *local_store; |
268 | |
269 | if (write && !(vma->vm_flags & VM_WRITE)) |
270 | return -EACCES; |
271 | if (spu_acquire(ctx)) |
272 | return -EINTR; |
273 | if ((offset + len) > vma->vm_end) |
274 | len = vma->vm_end - offset; |
275 | local_store = ctx->ops->get_ls(ctx); |
276 | if (write) |
277 | memcpy_toio(local_store + offset, buf, len); |
278 | else |
279 | memcpy_fromio(buf, local_store + offset, len); |
280 | spu_release(ctx); |
281 | return len; |
282 | } |
283 | |
284 | static const struct vm_operations_struct spufs_mem_mmap_vmops = { |
285 | .fault = spufs_mem_mmap_fault, |
286 | .access = spufs_mem_mmap_access, |
287 | }; |
288 | |
289 | static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) |
290 | { |
291 | if (!(vma->vm_flags & VM_SHARED)) |
292 | return -EINVAL; |
293 | |
294 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
295 | vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); |
296 | |
297 | vma->vm_ops = &spufs_mem_mmap_vmops; |
298 | return 0; |
299 | } |
300 | |
301 | static const struct file_operations spufs_mem_fops = { |
302 | .open = spufs_mem_open, |
303 | .release = spufs_mem_release, |
304 | .read = spufs_mem_read, |
305 | .write = spufs_mem_write, |
306 | .llseek = generic_file_llseek, |
307 | .mmap = spufs_mem_mmap, |
308 | }; |
309 | |
310 | static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, |
311 | unsigned long ps_offs, |
312 | unsigned long ps_size) |
313 | { |
314 | struct spu_context *ctx = vmf->vma->vm_file->private_data; |
315 | unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; |
316 | int err = 0; |
317 | vm_fault_t ret = VM_FAULT_NOPAGE; |
318 | |
319 | spu_context_nospu_trace(spufs_ps_fault__enter, ctx); |
320 | |
321 | if (offset >= ps_size) |
322 | return VM_FAULT_SIGBUS; |
323 | |
324 | if (fatal_signal_pending(current)) |
325 | return VM_FAULT_SIGBUS; |
326 | |
327 | /* |
328 | * Because we release the mmap_lock, the context may be destroyed while |
329 | * we're in spu_wait. Grab an extra reference so it isn't destroyed |
330 | * in the meantime. |
331 | */ |
332 | get_spu_context(ctx); |
333 | |
334 | /* |
335 | * We have to wait for context to be loaded before we have |
336 | * pages to hand out to the user, but we don't want to wait |
337 | * with the mmap_lock held. |
338 | * It is possible to drop the mmap_lock here, but then we need |
339 | * to return VM_FAULT_NOPAGE because the mappings may have |
340 | * hanged. |
341 | */ |
342 | if (spu_acquire(ctx)) |
343 | goto refault; |
344 | |
345 | if (ctx->state == SPU_STATE_SAVED) { |
346 | mmap_read_unlock(current->mm); |
347 | spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); |
348 | err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); |
349 | spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); |
350 | mmap_read_lock(current->mm); |
351 | } else { |
352 | area = ctx->spu->problem_phys + ps_offs; |
353 | ret = vmf_insert_pfn(vma: vmf->vma, addr: vmf->address, |
354 | pfn: (area + offset) >> PAGE_SHIFT); |
355 | spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); |
356 | } |
357 | |
358 | if (!err) |
359 | spu_release(ctx); |
360 | |
361 | refault: |
362 | put_spu_context(ctx); |
363 | return ret; |
364 | } |
365 | |
366 | #if SPUFS_MMAP_4K |
367 | static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) |
368 | { |
369 | return spufs_ps_fault(vmf, ps_offs: 0x4000, SPUFS_CNTL_MAP_SIZE); |
370 | } |
371 | |
372 | static const struct vm_operations_struct spufs_cntl_mmap_vmops = { |
373 | .fault = spufs_cntl_mmap_fault, |
374 | }; |
375 | |
376 | /* |
377 | * mmap support for problem state control area [0x4000 - 0x4fff]. |
378 | */ |
379 | static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) |
380 | { |
381 | if (!(vma->vm_flags & VM_SHARED)) |
382 | return -EINVAL; |
383 | |
384 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
385 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
386 | |
387 | vma->vm_ops = &spufs_cntl_mmap_vmops; |
388 | return 0; |
389 | } |
390 | #else /* SPUFS_MMAP_4K */ |
391 | #define spufs_cntl_mmap NULL |
392 | #endif /* !SPUFS_MMAP_4K */ |
393 | |
394 | static int spufs_cntl_get(void *data, u64 *val) |
395 | { |
396 | struct spu_context *ctx = data; |
397 | int ret; |
398 | |
399 | ret = spu_acquire(ctx); |
400 | if (ret) |
401 | return ret; |
402 | *val = ctx->ops->status_read(ctx); |
403 | spu_release(ctx); |
404 | |
405 | return 0; |
406 | } |
407 | |
408 | static int spufs_cntl_set(void *data, u64 val) |
409 | { |
410 | struct spu_context *ctx = data; |
411 | int ret; |
412 | |
413 | ret = spu_acquire(ctx); |
414 | if (ret) |
415 | return ret; |
416 | ctx->ops->runcntl_write(ctx, val); |
417 | spu_release(ctx); |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | static int spufs_cntl_open(struct inode *inode, struct file *file) |
423 | { |
424 | struct spufs_inode_info *i = SPUFS_I(inode); |
425 | struct spu_context *ctx = i->i_ctx; |
426 | |
427 | mutex_lock(&ctx->mapping_lock); |
428 | file->private_data = ctx; |
429 | if (!i->i_openers++) |
430 | ctx->cntl = inode->i_mapping; |
431 | mutex_unlock(lock: &ctx->mapping_lock); |
432 | return simple_attr_open(inode, file, get: spufs_cntl_get, |
433 | set: spufs_cntl_set, fmt: "0x%08lx" ); |
434 | } |
435 | |
436 | static int |
437 | spufs_cntl_release(struct inode *inode, struct file *file) |
438 | { |
439 | struct spufs_inode_info *i = SPUFS_I(inode); |
440 | struct spu_context *ctx = i->i_ctx; |
441 | |
442 | simple_attr_release(inode, file); |
443 | |
444 | mutex_lock(&ctx->mapping_lock); |
445 | if (!--i->i_openers) |
446 | ctx->cntl = NULL; |
447 | mutex_unlock(lock: &ctx->mapping_lock); |
448 | return 0; |
449 | } |
450 | |
451 | static const struct file_operations spufs_cntl_fops = { |
452 | .open = spufs_cntl_open, |
453 | .release = spufs_cntl_release, |
454 | .read = simple_attr_read, |
455 | .write = simple_attr_write, |
456 | .mmap = spufs_cntl_mmap, |
457 | }; |
458 | |
459 | static int |
460 | spufs_regs_open(struct inode *inode, struct file *file) |
461 | { |
462 | struct spufs_inode_info *i = SPUFS_I(inode); |
463 | file->private_data = i->i_ctx; |
464 | return 0; |
465 | } |
466 | |
467 | static ssize_t |
468 | spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm) |
469 | { |
470 | return spufs_dump_emit(cprm, buf: ctx->csa.lscsa->gprs, |
471 | size: sizeof(ctx->csa.lscsa->gprs)); |
472 | } |
473 | |
474 | static ssize_t |
475 | spufs_regs_read(struct file *file, char __user *buffer, |
476 | size_t size, loff_t *pos) |
477 | { |
478 | int ret; |
479 | struct spu_context *ctx = file->private_data; |
480 | |
481 | /* pre-check for file position: if we'd return EOF, there's no point |
482 | * causing a deschedule */ |
483 | if (*pos >= sizeof(ctx->csa.lscsa->gprs)) |
484 | return 0; |
485 | |
486 | ret = spu_acquire_saved(ctx); |
487 | if (ret) |
488 | return ret; |
489 | ret = simple_read_from_buffer(to: buffer, count: size, ppos: pos, from: ctx->csa.lscsa->gprs, |
490 | available: sizeof(ctx->csa.lscsa->gprs)); |
491 | spu_release_saved(ctx); |
492 | return ret; |
493 | } |
494 | |
495 | static ssize_t |
496 | spufs_regs_write(struct file *file, const char __user *buffer, |
497 | size_t size, loff_t *pos) |
498 | { |
499 | struct spu_context *ctx = file->private_data; |
500 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
501 | int ret; |
502 | |
503 | if (*pos >= sizeof(lscsa->gprs)) |
504 | return -EFBIG; |
505 | |
506 | ret = spu_acquire_saved(ctx); |
507 | if (ret) |
508 | return ret; |
509 | |
510 | size = simple_write_to_buffer(to: lscsa->gprs, available: sizeof(lscsa->gprs), ppos: pos, |
511 | from: buffer, count: size); |
512 | |
513 | spu_release_saved(ctx); |
514 | return size; |
515 | } |
516 | |
517 | static const struct file_operations spufs_regs_fops = { |
518 | .open = spufs_regs_open, |
519 | .read = spufs_regs_read, |
520 | .write = spufs_regs_write, |
521 | .llseek = generic_file_llseek, |
522 | }; |
523 | |
524 | static ssize_t |
525 | spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm) |
526 | { |
527 | return spufs_dump_emit(cprm, buf: &ctx->csa.lscsa->fpcr, |
528 | size: sizeof(ctx->csa.lscsa->fpcr)); |
529 | } |
530 | |
531 | static ssize_t |
532 | spufs_fpcr_read(struct file *file, char __user * buffer, |
533 | size_t size, loff_t * pos) |
534 | { |
535 | int ret; |
536 | struct spu_context *ctx = file->private_data; |
537 | |
538 | ret = spu_acquire_saved(ctx); |
539 | if (ret) |
540 | return ret; |
541 | ret = simple_read_from_buffer(to: buffer, count: size, ppos: pos, from: &ctx->csa.lscsa->fpcr, |
542 | available: sizeof(ctx->csa.lscsa->fpcr)); |
543 | spu_release_saved(ctx); |
544 | return ret; |
545 | } |
546 | |
547 | static ssize_t |
548 | spufs_fpcr_write(struct file *file, const char __user * buffer, |
549 | size_t size, loff_t * pos) |
550 | { |
551 | struct spu_context *ctx = file->private_data; |
552 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
553 | int ret; |
554 | |
555 | if (*pos >= sizeof(lscsa->fpcr)) |
556 | return -EFBIG; |
557 | |
558 | ret = spu_acquire_saved(ctx); |
559 | if (ret) |
560 | return ret; |
561 | |
562 | size = simple_write_to_buffer(to: &lscsa->fpcr, available: sizeof(lscsa->fpcr), ppos: pos, |
563 | from: buffer, count: size); |
564 | |
565 | spu_release_saved(ctx); |
566 | return size; |
567 | } |
568 | |
569 | static const struct file_operations spufs_fpcr_fops = { |
570 | .open = spufs_regs_open, |
571 | .read = spufs_fpcr_read, |
572 | .write = spufs_fpcr_write, |
573 | .llseek = generic_file_llseek, |
574 | }; |
575 | |
576 | /* generic open function for all pipe-like files */ |
577 | static int spufs_pipe_open(struct inode *inode, struct file *file) |
578 | { |
579 | struct spufs_inode_info *i = SPUFS_I(inode); |
580 | file->private_data = i->i_ctx; |
581 | |
582 | return stream_open(inode, filp: file); |
583 | } |
584 | |
585 | /* |
586 | * Read as many bytes from the mailbox as possible, until |
587 | * one of the conditions becomes true: |
588 | * |
589 | * - no more data available in the mailbox |
590 | * - end of the user provided buffer |
591 | * - end of the mapped area |
592 | */ |
593 | static ssize_t spufs_mbox_read(struct file *file, char __user *buf, |
594 | size_t len, loff_t *pos) |
595 | { |
596 | struct spu_context *ctx = file->private_data; |
597 | u32 mbox_data, __user *udata = (void __user *)buf; |
598 | ssize_t count; |
599 | |
600 | if (len < 4) |
601 | return -EINVAL; |
602 | |
603 | count = spu_acquire(ctx); |
604 | if (count) |
605 | return count; |
606 | |
607 | for (count = 0; (count + 4) <= len; count += 4, udata++) { |
608 | int ret; |
609 | ret = ctx->ops->mbox_read(ctx, &mbox_data); |
610 | if (ret == 0) |
611 | break; |
612 | |
613 | /* |
614 | * at the end of the mapped area, we can fault |
615 | * but still need to return the data we have |
616 | * read successfully so far. |
617 | */ |
618 | ret = put_user(mbox_data, udata); |
619 | if (ret) { |
620 | if (!count) |
621 | count = -EFAULT; |
622 | break; |
623 | } |
624 | } |
625 | spu_release(ctx); |
626 | |
627 | if (!count) |
628 | count = -EAGAIN; |
629 | |
630 | return count; |
631 | } |
632 | |
633 | static const struct file_operations spufs_mbox_fops = { |
634 | .open = spufs_pipe_open, |
635 | .read = spufs_mbox_read, |
636 | }; |
637 | |
638 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, |
639 | size_t len, loff_t *pos) |
640 | { |
641 | struct spu_context *ctx = file->private_data; |
642 | ssize_t ret; |
643 | u32 mbox_stat; |
644 | |
645 | if (len < 4) |
646 | return -EINVAL; |
647 | |
648 | ret = spu_acquire(ctx); |
649 | if (ret) |
650 | return ret; |
651 | |
652 | mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; |
653 | |
654 | spu_release(ctx); |
655 | |
656 | if (copy_to_user(to: buf, from: &mbox_stat, n: sizeof mbox_stat)) |
657 | return -EFAULT; |
658 | |
659 | return 4; |
660 | } |
661 | |
662 | static const struct file_operations spufs_mbox_stat_fops = { |
663 | .open = spufs_pipe_open, |
664 | .read = spufs_mbox_stat_read, |
665 | }; |
666 | |
667 | /* low-level ibox access function */ |
668 | size_t spu_ibox_read(struct spu_context *ctx, u32 *data) |
669 | { |
670 | return ctx->ops->ibox_read(ctx, data); |
671 | } |
672 | |
673 | /* interrupt-level ibox callback function. */ |
674 | void spufs_ibox_callback(struct spu *spu) |
675 | { |
676 | struct spu_context *ctx = spu->ctx; |
677 | |
678 | if (ctx) |
679 | wake_up_all(&ctx->ibox_wq); |
680 | } |
681 | |
682 | /* |
683 | * Read as many bytes from the interrupt mailbox as possible, until |
684 | * one of the conditions becomes true: |
685 | * |
686 | * - no more data available in the mailbox |
687 | * - end of the user provided buffer |
688 | * - end of the mapped area |
689 | * |
690 | * If the file is opened without O_NONBLOCK, we wait here until |
691 | * any data is available, but return when we have been able to |
692 | * read something. |
693 | */ |
694 | static ssize_t spufs_ibox_read(struct file *file, char __user *buf, |
695 | size_t len, loff_t *pos) |
696 | { |
697 | struct spu_context *ctx = file->private_data; |
698 | u32 ibox_data, __user *udata = (void __user *)buf; |
699 | ssize_t count; |
700 | |
701 | if (len < 4) |
702 | return -EINVAL; |
703 | |
704 | count = spu_acquire(ctx); |
705 | if (count) |
706 | goto out; |
707 | |
708 | /* wait only for the first element */ |
709 | count = 0; |
710 | if (file->f_flags & O_NONBLOCK) { |
711 | if (!spu_ibox_read(ctx, data: &ibox_data)) { |
712 | count = -EAGAIN; |
713 | goto out_unlock; |
714 | } |
715 | } else { |
716 | count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); |
717 | if (count) |
718 | goto out; |
719 | } |
720 | |
721 | /* if we can't write at all, return -EFAULT */ |
722 | count = put_user(ibox_data, udata); |
723 | if (count) |
724 | goto out_unlock; |
725 | |
726 | for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { |
727 | int ret; |
728 | ret = ctx->ops->ibox_read(ctx, &ibox_data); |
729 | if (ret == 0) |
730 | break; |
731 | /* |
732 | * at the end of the mapped area, we can fault |
733 | * but still need to return the data we have |
734 | * read successfully so far. |
735 | */ |
736 | ret = put_user(ibox_data, udata); |
737 | if (ret) |
738 | break; |
739 | } |
740 | |
741 | out_unlock: |
742 | spu_release(ctx); |
743 | out: |
744 | return count; |
745 | } |
746 | |
747 | static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait) |
748 | { |
749 | struct spu_context *ctx = file->private_data; |
750 | __poll_t mask; |
751 | |
752 | poll_wait(filp: file, wait_address: &ctx->ibox_wq, p: wait); |
753 | |
754 | /* |
755 | * For now keep this uninterruptible and also ignore the rule |
756 | * that poll should not sleep. Will be fixed later. |
757 | */ |
758 | mutex_lock(&ctx->state_mutex); |
759 | mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); |
760 | spu_release(ctx); |
761 | |
762 | return mask; |
763 | } |
764 | |
765 | static const struct file_operations spufs_ibox_fops = { |
766 | .open = spufs_pipe_open, |
767 | .read = spufs_ibox_read, |
768 | .poll = spufs_ibox_poll, |
769 | }; |
770 | |
771 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, |
772 | size_t len, loff_t *pos) |
773 | { |
774 | struct spu_context *ctx = file->private_data; |
775 | ssize_t ret; |
776 | u32 ibox_stat; |
777 | |
778 | if (len < 4) |
779 | return -EINVAL; |
780 | |
781 | ret = spu_acquire(ctx); |
782 | if (ret) |
783 | return ret; |
784 | ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; |
785 | spu_release(ctx); |
786 | |
787 | if (copy_to_user(to: buf, from: &ibox_stat, n: sizeof ibox_stat)) |
788 | return -EFAULT; |
789 | |
790 | return 4; |
791 | } |
792 | |
793 | static const struct file_operations spufs_ibox_stat_fops = { |
794 | .open = spufs_pipe_open, |
795 | .read = spufs_ibox_stat_read, |
796 | }; |
797 | |
798 | /* low-level mailbox write */ |
799 | size_t spu_wbox_write(struct spu_context *ctx, u32 data) |
800 | { |
801 | return ctx->ops->wbox_write(ctx, data); |
802 | } |
803 | |
804 | /* interrupt-level wbox callback function. */ |
805 | void spufs_wbox_callback(struct spu *spu) |
806 | { |
807 | struct spu_context *ctx = spu->ctx; |
808 | |
809 | if (ctx) |
810 | wake_up_all(&ctx->wbox_wq); |
811 | } |
812 | |
813 | /* |
814 | * Write as many bytes to the interrupt mailbox as possible, until |
815 | * one of the conditions becomes true: |
816 | * |
817 | * - the mailbox is full |
818 | * - end of the user provided buffer |
819 | * - end of the mapped area |
820 | * |
821 | * If the file is opened without O_NONBLOCK, we wait here until |
822 | * space is available, but return when we have been able to |
823 | * write something. |
824 | */ |
825 | static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, |
826 | size_t len, loff_t *pos) |
827 | { |
828 | struct spu_context *ctx = file->private_data; |
829 | u32 wbox_data, __user *udata = (void __user *)buf; |
830 | ssize_t count; |
831 | |
832 | if (len < 4) |
833 | return -EINVAL; |
834 | |
835 | if (get_user(wbox_data, udata)) |
836 | return -EFAULT; |
837 | |
838 | count = spu_acquire(ctx); |
839 | if (count) |
840 | goto out; |
841 | |
842 | /* |
843 | * make sure we can at least write one element, by waiting |
844 | * in case of !O_NONBLOCK |
845 | */ |
846 | count = 0; |
847 | if (file->f_flags & O_NONBLOCK) { |
848 | if (!spu_wbox_write(ctx, data: wbox_data)) { |
849 | count = -EAGAIN; |
850 | goto out_unlock; |
851 | } |
852 | } else { |
853 | count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); |
854 | if (count) |
855 | goto out; |
856 | } |
857 | |
858 | |
859 | /* write as much as possible */ |
860 | for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { |
861 | int ret; |
862 | ret = get_user(wbox_data, udata); |
863 | if (ret) |
864 | break; |
865 | |
866 | ret = spu_wbox_write(ctx, data: wbox_data); |
867 | if (ret == 0) |
868 | break; |
869 | } |
870 | |
871 | out_unlock: |
872 | spu_release(ctx); |
873 | out: |
874 | return count; |
875 | } |
876 | |
877 | static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait) |
878 | { |
879 | struct spu_context *ctx = file->private_data; |
880 | __poll_t mask; |
881 | |
882 | poll_wait(filp: file, wait_address: &ctx->wbox_wq, p: wait); |
883 | |
884 | /* |
885 | * For now keep this uninterruptible and also ignore the rule |
886 | * that poll should not sleep. Will be fixed later. |
887 | */ |
888 | mutex_lock(&ctx->state_mutex); |
889 | mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); |
890 | spu_release(ctx); |
891 | |
892 | return mask; |
893 | } |
894 | |
895 | static const struct file_operations spufs_wbox_fops = { |
896 | .open = spufs_pipe_open, |
897 | .write = spufs_wbox_write, |
898 | .poll = spufs_wbox_poll, |
899 | }; |
900 | |
901 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, |
902 | size_t len, loff_t *pos) |
903 | { |
904 | struct spu_context *ctx = file->private_data; |
905 | ssize_t ret; |
906 | u32 wbox_stat; |
907 | |
908 | if (len < 4) |
909 | return -EINVAL; |
910 | |
911 | ret = spu_acquire(ctx); |
912 | if (ret) |
913 | return ret; |
914 | wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; |
915 | spu_release(ctx); |
916 | |
917 | if (copy_to_user(to: buf, from: &wbox_stat, n: sizeof wbox_stat)) |
918 | return -EFAULT; |
919 | |
920 | return 4; |
921 | } |
922 | |
923 | static const struct file_operations spufs_wbox_stat_fops = { |
924 | .open = spufs_pipe_open, |
925 | .read = spufs_wbox_stat_read, |
926 | }; |
927 | |
928 | static int spufs_signal1_open(struct inode *inode, struct file *file) |
929 | { |
930 | struct spufs_inode_info *i = SPUFS_I(inode); |
931 | struct spu_context *ctx = i->i_ctx; |
932 | |
933 | mutex_lock(&ctx->mapping_lock); |
934 | file->private_data = ctx; |
935 | if (!i->i_openers++) |
936 | ctx->signal1 = inode->i_mapping; |
937 | mutex_unlock(lock: &ctx->mapping_lock); |
938 | return nonseekable_open(inode, filp: file); |
939 | } |
940 | |
941 | static int |
942 | spufs_signal1_release(struct inode *inode, struct file *file) |
943 | { |
944 | struct spufs_inode_info *i = SPUFS_I(inode); |
945 | struct spu_context *ctx = i->i_ctx; |
946 | |
947 | mutex_lock(&ctx->mapping_lock); |
948 | if (!--i->i_openers) |
949 | ctx->signal1 = NULL; |
950 | mutex_unlock(lock: &ctx->mapping_lock); |
951 | return 0; |
952 | } |
953 | |
954 | static ssize_t spufs_signal1_dump(struct spu_context *ctx, |
955 | struct coredump_params *cprm) |
956 | { |
957 | if (!ctx->csa.spu_chnlcnt_RW[3]) |
958 | return 0; |
959 | return spufs_dump_emit(cprm, buf: &ctx->csa.spu_chnldata_RW[3], |
960 | size: sizeof(ctx->csa.spu_chnldata_RW[3])); |
961 | } |
962 | |
963 | static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, |
964 | size_t len) |
965 | { |
966 | if (len < sizeof(ctx->csa.spu_chnldata_RW[3])) |
967 | return -EINVAL; |
968 | if (!ctx->csa.spu_chnlcnt_RW[3]) |
969 | return 0; |
970 | if (copy_to_user(to: buf, from: &ctx->csa.spu_chnldata_RW[3], |
971 | n: sizeof(ctx->csa.spu_chnldata_RW[3]))) |
972 | return -EFAULT; |
973 | return sizeof(ctx->csa.spu_chnldata_RW[3]); |
974 | } |
975 | |
976 | static ssize_t spufs_signal1_read(struct file *file, char __user *buf, |
977 | size_t len, loff_t *pos) |
978 | { |
979 | int ret; |
980 | struct spu_context *ctx = file->private_data; |
981 | |
982 | ret = spu_acquire_saved(ctx); |
983 | if (ret) |
984 | return ret; |
985 | ret = __spufs_signal1_read(ctx, buf, len); |
986 | spu_release_saved(ctx); |
987 | |
988 | return ret; |
989 | } |
990 | |
991 | static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, |
992 | size_t len, loff_t *pos) |
993 | { |
994 | struct spu_context *ctx; |
995 | ssize_t ret; |
996 | u32 data; |
997 | |
998 | ctx = file->private_data; |
999 | |
1000 | if (len < 4) |
1001 | return -EINVAL; |
1002 | |
1003 | if (copy_from_user(to: &data, from: buf, n: 4)) |
1004 | return -EFAULT; |
1005 | |
1006 | ret = spu_acquire(ctx); |
1007 | if (ret) |
1008 | return ret; |
1009 | ctx->ops->signal1_write(ctx, data); |
1010 | spu_release(ctx); |
1011 | |
1012 | return 4; |
1013 | } |
1014 | |
1015 | static vm_fault_t |
1016 | spufs_signal1_mmap_fault(struct vm_fault *vmf) |
1017 | { |
1018 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 |
1019 | return spufs_ps_fault(vmf, ps_offs: 0x14000, SPUFS_SIGNAL_MAP_SIZE); |
1020 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 |
1021 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole |
1022 | * signal 1 and 2 area |
1023 | */ |
1024 | return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); |
1025 | #else |
1026 | #error unsupported page size |
1027 | #endif |
1028 | } |
1029 | |
1030 | static const struct vm_operations_struct spufs_signal1_mmap_vmops = { |
1031 | .fault = spufs_signal1_mmap_fault, |
1032 | }; |
1033 | |
1034 | static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) |
1035 | { |
1036 | if (!(vma->vm_flags & VM_SHARED)) |
1037 | return -EINVAL; |
1038 | |
1039 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
1040 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1041 | |
1042 | vma->vm_ops = &spufs_signal1_mmap_vmops; |
1043 | return 0; |
1044 | } |
1045 | |
1046 | static const struct file_operations spufs_signal1_fops = { |
1047 | .open = spufs_signal1_open, |
1048 | .release = spufs_signal1_release, |
1049 | .read = spufs_signal1_read, |
1050 | .write = spufs_signal1_write, |
1051 | .mmap = spufs_signal1_mmap, |
1052 | }; |
1053 | |
1054 | static const struct file_operations spufs_signal1_nosched_fops = { |
1055 | .open = spufs_signal1_open, |
1056 | .release = spufs_signal1_release, |
1057 | .write = spufs_signal1_write, |
1058 | .mmap = spufs_signal1_mmap, |
1059 | }; |
1060 | |
1061 | static int spufs_signal2_open(struct inode *inode, struct file *file) |
1062 | { |
1063 | struct spufs_inode_info *i = SPUFS_I(inode); |
1064 | struct spu_context *ctx = i->i_ctx; |
1065 | |
1066 | mutex_lock(&ctx->mapping_lock); |
1067 | file->private_data = ctx; |
1068 | if (!i->i_openers++) |
1069 | ctx->signal2 = inode->i_mapping; |
1070 | mutex_unlock(lock: &ctx->mapping_lock); |
1071 | return nonseekable_open(inode, filp: file); |
1072 | } |
1073 | |
1074 | static int |
1075 | spufs_signal2_release(struct inode *inode, struct file *file) |
1076 | { |
1077 | struct spufs_inode_info *i = SPUFS_I(inode); |
1078 | struct spu_context *ctx = i->i_ctx; |
1079 | |
1080 | mutex_lock(&ctx->mapping_lock); |
1081 | if (!--i->i_openers) |
1082 | ctx->signal2 = NULL; |
1083 | mutex_unlock(lock: &ctx->mapping_lock); |
1084 | return 0; |
1085 | } |
1086 | |
1087 | static ssize_t spufs_signal2_dump(struct spu_context *ctx, |
1088 | struct coredump_params *cprm) |
1089 | { |
1090 | if (!ctx->csa.spu_chnlcnt_RW[4]) |
1091 | return 0; |
1092 | return spufs_dump_emit(cprm, buf: &ctx->csa.spu_chnldata_RW[4], |
1093 | size: sizeof(ctx->csa.spu_chnldata_RW[4])); |
1094 | } |
1095 | |
1096 | static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, |
1097 | size_t len) |
1098 | { |
1099 | if (len < sizeof(ctx->csa.spu_chnldata_RW[4])) |
1100 | return -EINVAL; |
1101 | if (!ctx->csa.spu_chnlcnt_RW[4]) |
1102 | return 0; |
1103 | if (copy_to_user(to: buf, from: &ctx->csa.spu_chnldata_RW[4], |
1104 | n: sizeof(ctx->csa.spu_chnldata_RW[4]))) |
1105 | return -EFAULT; |
1106 | return sizeof(ctx->csa.spu_chnldata_RW[4]); |
1107 | } |
1108 | |
1109 | static ssize_t spufs_signal2_read(struct file *file, char __user *buf, |
1110 | size_t len, loff_t *pos) |
1111 | { |
1112 | struct spu_context *ctx = file->private_data; |
1113 | int ret; |
1114 | |
1115 | ret = spu_acquire_saved(ctx); |
1116 | if (ret) |
1117 | return ret; |
1118 | ret = __spufs_signal2_read(ctx, buf, len); |
1119 | spu_release_saved(ctx); |
1120 | |
1121 | return ret; |
1122 | } |
1123 | |
1124 | static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, |
1125 | size_t len, loff_t *pos) |
1126 | { |
1127 | struct spu_context *ctx; |
1128 | ssize_t ret; |
1129 | u32 data; |
1130 | |
1131 | ctx = file->private_data; |
1132 | |
1133 | if (len < 4) |
1134 | return -EINVAL; |
1135 | |
1136 | if (copy_from_user(to: &data, from: buf, n: 4)) |
1137 | return -EFAULT; |
1138 | |
1139 | ret = spu_acquire(ctx); |
1140 | if (ret) |
1141 | return ret; |
1142 | ctx->ops->signal2_write(ctx, data); |
1143 | spu_release(ctx); |
1144 | |
1145 | return 4; |
1146 | } |
1147 | |
1148 | #if SPUFS_MMAP_4K |
1149 | static vm_fault_t |
1150 | spufs_signal2_mmap_fault(struct vm_fault *vmf) |
1151 | { |
1152 | #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 |
1153 | return spufs_ps_fault(vmf, ps_offs: 0x1c000, SPUFS_SIGNAL_MAP_SIZE); |
1154 | #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 |
1155 | /* For 64k pages, both signal1 and signal2 can be used to mmap the whole |
1156 | * signal 1 and 2 area |
1157 | */ |
1158 | return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); |
1159 | #else |
1160 | #error unsupported page size |
1161 | #endif |
1162 | } |
1163 | |
1164 | static const struct vm_operations_struct spufs_signal2_mmap_vmops = { |
1165 | .fault = spufs_signal2_mmap_fault, |
1166 | }; |
1167 | |
1168 | static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) |
1169 | { |
1170 | if (!(vma->vm_flags & VM_SHARED)) |
1171 | return -EINVAL; |
1172 | |
1173 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
1174 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1175 | |
1176 | vma->vm_ops = &spufs_signal2_mmap_vmops; |
1177 | return 0; |
1178 | } |
1179 | #else /* SPUFS_MMAP_4K */ |
1180 | #define spufs_signal2_mmap NULL |
1181 | #endif /* !SPUFS_MMAP_4K */ |
1182 | |
1183 | static const struct file_operations spufs_signal2_fops = { |
1184 | .open = spufs_signal2_open, |
1185 | .release = spufs_signal2_release, |
1186 | .read = spufs_signal2_read, |
1187 | .write = spufs_signal2_write, |
1188 | .mmap = spufs_signal2_mmap, |
1189 | }; |
1190 | |
1191 | static const struct file_operations spufs_signal2_nosched_fops = { |
1192 | .open = spufs_signal2_open, |
1193 | .release = spufs_signal2_release, |
1194 | .write = spufs_signal2_write, |
1195 | .mmap = spufs_signal2_mmap, |
1196 | }; |
1197 | |
1198 | /* |
1199 | * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the |
1200 | * work of acquiring (or not) the SPU context before calling through |
1201 | * to the actual get routine. The set routine is called directly. |
1202 | */ |
1203 | #define SPU_ATTR_NOACQUIRE 0 |
1204 | #define SPU_ATTR_ACQUIRE 1 |
1205 | #define SPU_ATTR_ACQUIRE_SAVED 2 |
1206 | |
1207 | #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ |
1208 | static int __##__get(void *data, u64 *val) \ |
1209 | { \ |
1210 | struct spu_context *ctx = data; \ |
1211 | int ret = 0; \ |
1212 | \ |
1213 | if (__acquire == SPU_ATTR_ACQUIRE) { \ |
1214 | ret = spu_acquire(ctx); \ |
1215 | if (ret) \ |
1216 | return ret; \ |
1217 | *val = __get(ctx); \ |
1218 | spu_release(ctx); \ |
1219 | } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ |
1220 | ret = spu_acquire_saved(ctx); \ |
1221 | if (ret) \ |
1222 | return ret; \ |
1223 | *val = __get(ctx); \ |
1224 | spu_release_saved(ctx); \ |
1225 | } else \ |
1226 | *val = __get(ctx); \ |
1227 | \ |
1228 | return 0; \ |
1229 | } \ |
1230 | DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); |
1231 | |
1232 | static int spufs_signal1_type_set(void *data, u64 val) |
1233 | { |
1234 | struct spu_context *ctx = data; |
1235 | int ret; |
1236 | |
1237 | ret = spu_acquire(ctx); |
1238 | if (ret) |
1239 | return ret; |
1240 | ctx->ops->signal1_type_set(ctx, val); |
1241 | spu_release(ctx); |
1242 | |
1243 | return 0; |
1244 | } |
1245 | |
1246 | static u64 spufs_signal1_type_get(struct spu_context *ctx) |
1247 | { |
1248 | return ctx->ops->signal1_type_get(ctx); |
1249 | } |
1250 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, |
1251 | spufs_signal1_type_set, "%llu\n" , SPU_ATTR_ACQUIRE); |
1252 | |
1253 | |
1254 | static int spufs_signal2_type_set(void *data, u64 val) |
1255 | { |
1256 | struct spu_context *ctx = data; |
1257 | int ret; |
1258 | |
1259 | ret = spu_acquire(ctx); |
1260 | if (ret) |
1261 | return ret; |
1262 | ctx->ops->signal2_type_set(ctx, val); |
1263 | spu_release(ctx); |
1264 | |
1265 | return 0; |
1266 | } |
1267 | |
1268 | static u64 spufs_signal2_type_get(struct spu_context *ctx) |
1269 | { |
1270 | return ctx->ops->signal2_type_get(ctx); |
1271 | } |
1272 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, |
1273 | spufs_signal2_type_set, "%llu\n" , SPU_ATTR_ACQUIRE); |
1274 | |
1275 | #if SPUFS_MMAP_4K |
1276 | static vm_fault_t |
1277 | spufs_mss_mmap_fault(struct vm_fault *vmf) |
1278 | { |
1279 | return spufs_ps_fault(vmf, ps_offs: 0x0000, SPUFS_MSS_MAP_SIZE); |
1280 | } |
1281 | |
1282 | static const struct vm_operations_struct spufs_mss_mmap_vmops = { |
1283 | .fault = spufs_mss_mmap_fault, |
1284 | }; |
1285 | |
1286 | /* |
1287 | * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. |
1288 | */ |
1289 | static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) |
1290 | { |
1291 | if (!(vma->vm_flags & VM_SHARED)) |
1292 | return -EINVAL; |
1293 | |
1294 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
1295 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1296 | |
1297 | vma->vm_ops = &spufs_mss_mmap_vmops; |
1298 | return 0; |
1299 | } |
1300 | #else /* SPUFS_MMAP_4K */ |
1301 | #define spufs_mss_mmap NULL |
1302 | #endif /* !SPUFS_MMAP_4K */ |
1303 | |
1304 | static int spufs_mss_open(struct inode *inode, struct file *file) |
1305 | { |
1306 | struct spufs_inode_info *i = SPUFS_I(inode); |
1307 | struct spu_context *ctx = i->i_ctx; |
1308 | |
1309 | file->private_data = i->i_ctx; |
1310 | |
1311 | mutex_lock(&ctx->mapping_lock); |
1312 | if (!i->i_openers++) |
1313 | ctx->mss = inode->i_mapping; |
1314 | mutex_unlock(lock: &ctx->mapping_lock); |
1315 | return nonseekable_open(inode, filp: file); |
1316 | } |
1317 | |
1318 | static int |
1319 | spufs_mss_release(struct inode *inode, struct file *file) |
1320 | { |
1321 | struct spufs_inode_info *i = SPUFS_I(inode); |
1322 | struct spu_context *ctx = i->i_ctx; |
1323 | |
1324 | mutex_lock(&ctx->mapping_lock); |
1325 | if (!--i->i_openers) |
1326 | ctx->mss = NULL; |
1327 | mutex_unlock(lock: &ctx->mapping_lock); |
1328 | return 0; |
1329 | } |
1330 | |
1331 | static const struct file_operations spufs_mss_fops = { |
1332 | .open = spufs_mss_open, |
1333 | .release = spufs_mss_release, |
1334 | .mmap = spufs_mss_mmap, |
1335 | }; |
1336 | |
1337 | static vm_fault_t |
1338 | spufs_psmap_mmap_fault(struct vm_fault *vmf) |
1339 | { |
1340 | return spufs_ps_fault(vmf, ps_offs: 0x0000, SPUFS_PS_MAP_SIZE); |
1341 | } |
1342 | |
1343 | static const struct vm_operations_struct spufs_psmap_mmap_vmops = { |
1344 | .fault = spufs_psmap_mmap_fault, |
1345 | }; |
1346 | |
1347 | /* |
1348 | * mmap support for full problem state area [0x00000 - 0x1ffff]. |
1349 | */ |
1350 | static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) |
1351 | { |
1352 | if (!(vma->vm_flags & VM_SHARED)) |
1353 | return -EINVAL; |
1354 | |
1355 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
1356 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1357 | |
1358 | vma->vm_ops = &spufs_psmap_mmap_vmops; |
1359 | return 0; |
1360 | } |
1361 | |
1362 | static int spufs_psmap_open(struct inode *inode, struct file *file) |
1363 | { |
1364 | struct spufs_inode_info *i = SPUFS_I(inode); |
1365 | struct spu_context *ctx = i->i_ctx; |
1366 | |
1367 | mutex_lock(&ctx->mapping_lock); |
1368 | file->private_data = i->i_ctx; |
1369 | if (!i->i_openers++) |
1370 | ctx->psmap = inode->i_mapping; |
1371 | mutex_unlock(lock: &ctx->mapping_lock); |
1372 | return nonseekable_open(inode, filp: file); |
1373 | } |
1374 | |
1375 | static int |
1376 | spufs_psmap_release(struct inode *inode, struct file *file) |
1377 | { |
1378 | struct spufs_inode_info *i = SPUFS_I(inode); |
1379 | struct spu_context *ctx = i->i_ctx; |
1380 | |
1381 | mutex_lock(&ctx->mapping_lock); |
1382 | if (!--i->i_openers) |
1383 | ctx->psmap = NULL; |
1384 | mutex_unlock(lock: &ctx->mapping_lock); |
1385 | return 0; |
1386 | } |
1387 | |
1388 | static const struct file_operations spufs_psmap_fops = { |
1389 | .open = spufs_psmap_open, |
1390 | .release = spufs_psmap_release, |
1391 | .mmap = spufs_psmap_mmap, |
1392 | }; |
1393 | |
1394 | |
1395 | #if SPUFS_MMAP_4K |
1396 | static vm_fault_t |
1397 | spufs_mfc_mmap_fault(struct vm_fault *vmf) |
1398 | { |
1399 | return spufs_ps_fault(vmf, ps_offs: 0x3000, SPUFS_MFC_MAP_SIZE); |
1400 | } |
1401 | |
1402 | static const struct vm_operations_struct spufs_mfc_mmap_vmops = { |
1403 | .fault = spufs_mfc_mmap_fault, |
1404 | }; |
1405 | |
1406 | /* |
1407 | * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. |
1408 | */ |
1409 | static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) |
1410 | { |
1411 | if (!(vma->vm_flags & VM_SHARED)) |
1412 | return -EINVAL; |
1413 | |
1414 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
1415 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1416 | |
1417 | vma->vm_ops = &spufs_mfc_mmap_vmops; |
1418 | return 0; |
1419 | } |
1420 | #else /* SPUFS_MMAP_4K */ |
1421 | #define spufs_mfc_mmap NULL |
1422 | #endif /* !SPUFS_MMAP_4K */ |
1423 | |
1424 | static int spufs_mfc_open(struct inode *inode, struct file *file) |
1425 | { |
1426 | struct spufs_inode_info *i = SPUFS_I(inode); |
1427 | struct spu_context *ctx = i->i_ctx; |
1428 | |
1429 | /* we don't want to deal with DMA into other processes */ |
1430 | if (ctx->owner != current->mm) |
1431 | return -EINVAL; |
1432 | |
1433 | if (atomic_read(v: &inode->i_count) != 1) |
1434 | return -EBUSY; |
1435 | |
1436 | mutex_lock(&ctx->mapping_lock); |
1437 | file->private_data = ctx; |
1438 | if (!i->i_openers++) |
1439 | ctx->mfc = inode->i_mapping; |
1440 | mutex_unlock(lock: &ctx->mapping_lock); |
1441 | return nonseekable_open(inode, filp: file); |
1442 | } |
1443 | |
1444 | static int |
1445 | spufs_mfc_release(struct inode *inode, struct file *file) |
1446 | { |
1447 | struct spufs_inode_info *i = SPUFS_I(inode); |
1448 | struct spu_context *ctx = i->i_ctx; |
1449 | |
1450 | mutex_lock(&ctx->mapping_lock); |
1451 | if (!--i->i_openers) |
1452 | ctx->mfc = NULL; |
1453 | mutex_unlock(lock: &ctx->mapping_lock); |
1454 | return 0; |
1455 | } |
1456 | |
1457 | /* interrupt-level mfc callback function. */ |
1458 | void spufs_mfc_callback(struct spu *spu) |
1459 | { |
1460 | struct spu_context *ctx = spu->ctx; |
1461 | |
1462 | if (ctx) |
1463 | wake_up_all(&ctx->mfc_wq); |
1464 | } |
1465 | |
1466 | static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) |
1467 | { |
1468 | /* See if there is one tag group is complete */ |
1469 | /* FIXME we need locking around tagwait */ |
1470 | *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; |
1471 | ctx->tagwait &= ~*status; |
1472 | if (*status) |
1473 | return 1; |
1474 | |
1475 | /* enable interrupt waiting for any tag group, |
1476 | may silently fail if interrupts are already enabled */ |
1477 | ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); |
1478 | return 0; |
1479 | } |
1480 | |
1481 | static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, |
1482 | size_t size, loff_t *pos) |
1483 | { |
1484 | struct spu_context *ctx = file->private_data; |
1485 | int ret = -EINVAL; |
1486 | u32 status; |
1487 | |
1488 | if (size != 4) |
1489 | goto out; |
1490 | |
1491 | ret = spu_acquire(ctx); |
1492 | if (ret) |
1493 | return ret; |
1494 | |
1495 | ret = -EINVAL; |
1496 | if (file->f_flags & O_NONBLOCK) { |
1497 | status = ctx->ops->read_mfc_tagstatus(ctx); |
1498 | if (!(status & ctx->tagwait)) |
1499 | ret = -EAGAIN; |
1500 | else |
1501 | /* XXX(hch): shouldn't we clear ret here? */ |
1502 | ctx->tagwait &= ~status; |
1503 | } else { |
1504 | ret = spufs_wait(ctx->mfc_wq, |
1505 | spufs_read_mfc_tagstatus(ctx, &status)); |
1506 | if (ret) |
1507 | goto out; |
1508 | } |
1509 | spu_release(ctx); |
1510 | |
1511 | ret = 4; |
1512 | if (copy_to_user(to: buffer, from: &status, n: 4)) |
1513 | ret = -EFAULT; |
1514 | |
1515 | out: |
1516 | return ret; |
1517 | } |
1518 | |
1519 | static int spufs_check_valid_dma(struct mfc_dma_command *cmd) |
1520 | { |
1521 | pr_debug("queueing DMA %x %llx %x %x %x\n" , cmd->lsa, |
1522 | cmd->ea, cmd->size, cmd->tag, cmd->cmd); |
1523 | |
1524 | switch (cmd->cmd) { |
1525 | case MFC_PUT_CMD: |
1526 | case MFC_PUTF_CMD: |
1527 | case MFC_PUTB_CMD: |
1528 | case MFC_GET_CMD: |
1529 | case MFC_GETF_CMD: |
1530 | case MFC_GETB_CMD: |
1531 | break; |
1532 | default: |
1533 | pr_debug("invalid DMA opcode %x\n" , cmd->cmd); |
1534 | return -EIO; |
1535 | } |
1536 | |
1537 | if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { |
1538 | pr_debug("invalid DMA alignment, ea %llx lsa %x\n" , |
1539 | cmd->ea, cmd->lsa); |
1540 | return -EIO; |
1541 | } |
1542 | |
1543 | switch (cmd->size & 0xf) { |
1544 | case 1: |
1545 | break; |
1546 | case 2: |
1547 | if (cmd->lsa & 1) |
1548 | goto error; |
1549 | break; |
1550 | case 4: |
1551 | if (cmd->lsa & 3) |
1552 | goto error; |
1553 | break; |
1554 | case 8: |
1555 | if (cmd->lsa & 7) |
1556 | goto error; |
1557 | break; |
1558 | case 0: |
1559 | if (cmd->lsa & 15) |
1560 | goto error; |
1561 | break; |
1562 | error: |
1563 | default: |
1564 | pr_debug("invalid DMA alignment %x for size %x\n" , |
1565 | cmd->lsa & 0xf, cmd->size); |
1566 | return -EIO; |
1567 | } |
1568 | |
1569 | if (cmd->size > 16 * 1024) { |
1570 | pr_debug("invalid DMA size %x\n" , cmd->size); |
1571 | return -EIO; |
1572 | } |
1573 | |
1574 | if (cmd->tag & 0xfff0) { |
1575 | /* we reserve the higher tag numbers for kernel use */ |
1576 | pr_debug("invalid DMA tag\n" ); |
1577 | return -EIO; |
1578 | } |
1579 | |
1580 | if (cmd->class) { |
1581 | /* not supported in this version */ |
1582 | pr_debug("invalid DMA class\n" ); |
1583 | return -EIO; |
1584 | } |
1585 | |
1586 | return 0; |
1587 | } |
1588 | |
1589 | static int spu_send_mfc_command(struct spu_context *ctx, |
1590 | struct mfc_dma_command cmd, |
1591 | int *error) |
1592 | { |
1593 | *error = ctx->ops->send_mfc_command(ctx, &cmd); |
1594 | if (*error == -EAGAIN) { |
1595 | /* wait for any tag group to complete |
1596 | so we have space for the new command */ |
1597 | ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); |
1598 | /* try again, because the queue might be |
1599 | empty again */ |
1600 | *error = ctx->ops->send_mfc_command(ctx, &cmd); |
1601 | if (*error == -EAGAIN) |
1602 | return 0; |
1603 | } |
1604 | return 1; |
1605 | } |
1606 | |
1607 | static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, |
1608 | size_t size, loff_t *pos) |
1609 | { |
1610 | struct spu_context *ctx = file->private_data; |
1611 | struct mfc_dma_command cmd; |
1612 | int ret = -EINVAL; |
1613 | |
1614 | if (size != sizeof cmd) |
1615 | goto out; |
1616 | |
1617 | ret = -EFAULT; |
1618 | if (copy_from_user(to: &cmd, from: buffer, n: sizeof cmd)) |
1619 | goto out; |
1620 | |
1621 | ret = spufs_check_valid_dma(cmd: &cmd); |
1622 | if (ret) |
1623 | goto out; |
1624 | |
1625 | ret = spu_acquire(ctx); |
1626 | if (ret) |
1627 | goto out; |
1628 | |
1629 | ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); |
1630 | if (ret) |
1631 | goto out; |
1632 | |
1633 | if (file->f_flags & O_NONBLOCK) { |
1634 | ret = ctx->ops->send_mfc_command(ctx, &cmd); |
1635 | } else { |
1636 | int status; |
1637 | ret = spufs_wait(ctx->mfc_wq, |
1638 | spu_send_mfc_command(ctx, cmd, &status)); |
1639 | if (ret) |
1640 | goto out; |
1641 | if (status) |
1642 | ret = status; |
1643 | } |
1644 | |
1645 | if (ret) |
1646 | goto out_unlock; |
1647 | |
1648 | ctx->tagwait |= 1 << cmd.tag; |
1649 | ret = size; |
1650 | |
1651 | out_unlock: |
1652 | spu_release(ctx); |
1653 | out: |
1654 | return ret; |
1655 | } |
1656 | |
1657 | static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait) |
1658 | { |
1659 | struct spu_context *ctx = file->private_data; |
1660 | u32 free_elements, tagstatus; |
1661 | __poll_t mask; |
1662 | |
1663 | poll_wait(filp: file, wait_address: &ctx->mfc_wq, p: wait); |
1664 | |
1665 | /* |
1666 | * For now keep this uninterruptible and also ignore the rule |
1667 | * that poll should not sleep. Will be fixed later. |
1668 | */ |
1669 | mutex_lock(&ctx->state_mutex); |
1670 | ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); |
1671 | free_elements = ctx->ops->get_mfc_free_elements(ctx); |
1672 | tagstatus = ctx->ops->read_mfc_tagstatus(ctx); |
1673 | spu_release(ctx); |
1674 | |
1675 | mask = 0; |
1676 | if (free_elements & 0xffff) |
1677 | mask |= EPOLLOUT | EPOLLWRNORM; |
1678 | if (tagstatus & ctx->tagwait) |
1679 | mask |= EPOLLIN | EPOLLRDNORM; |
1680 | |
1681 | pr_debug("%s: free %d tagstatus %d tagwait %d\n" , __func__, |
1682 | free_elements, tagstatus, ctx->tagwait); |
1683 | |
1684 | return mask; |
1685 | } |
1686 | |
1687 | static int spufs_mfc_flush(struct file *file, fl_owner_t id) |
1688 | { |
1689 | struct spu_context *ctx = file->private_data; |
1690 | int ret; |
1691 | |
1692 | ret = spu_acquire(ctx); |
1693 | if (ret) |
1694 | return ret; |
1695 | |
1696 | spu_release(ctx); |
1697 | |
1698 | return 0; |
1699 | } |
1700 | |
1701 | static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
1702 | { |
1703 | struct inode *inode = file_inode(f: file); |
1704 | int err = file_write_and_wait_range(file, start, end); |
1705 | if (!err) { |
1706 | inode_lock(inode); |
1707 | err = spufs_mfc_flush(file, NULL); |
1708 | inode_unlock(inode); |
1709 | } |
1710 | return err; |
1711 | } |
1712 | |
1713 | static const struct file_operations spufs_mfc_fops = { |
1714 | .open = spufs_mfc_open, |
1715 | .release = spufs_mfc_release, |
1716 | .read = spufs_mfc_read, |
1717 | .write = spufs_mfc_write, |
1718 | .poll = spufs_mfc_poll, |
1719 | .flush = spufs_mfc_flush, |
1720 | .fsync = spufs_mfc_fsync, |
1721 | .mmap = spufs_mfc_mmap, |
1722 | }; |
1723 | |
1724 | static int spufs_npc_set(void *data, u64 val) |
1725 | { |
1726 | struct spu_context *ctx = data; |
1727 | int ret; |
1728 | |
1729 | ret = spu_acquire(ctx); |
1730 | if (ret) |
1731 | return ret; |
1732 | ctx->ops->npc_write(ctx, val); |
1733 | spu_release(ctx); |
1734 | |
1735 | return 0; |
1736 | } |
1737 | |
1738 | static u64 spufs_npc_get(struct spu_context *ctx) |
1739 | { |
1740 | return ctx->ops->npc_read(ctx); |
1741 | } |
1742 | DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, |
1743 | "0x%llx\n" , SPU_ATTR_ACQUIRE); |
1744 | |
1745 | static int spufs_decr_set(void *data, u64 val) |
1746 | { |
1747 | struct spu_context *ctx = data; |
1748 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1749 | int ret; |
1750 | |
1751 | ret = spu_acquire_saved(ctx); |
1752 | if (ret) |
1753 | return ret; |
1754 | lscsa->decr.slot[0] = (u32) val; |
1755 | spu_release_saved(ctx); |
1756 | |
1757 | return 0; |
1758 | } |
1759 | |
1760 | static u64 spufs_decr_get(struct spu_context *ctx) |
1761 | { |
1762 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1763 | return lscsa->decr.slot[0]; |
1764 | } |
1765 | DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, |
1766 | "0x%llx\n" , SPU_ATTR_ACQUIRE_SAVED); |
1767 | |
1768 | static int spufs_decr_status_set(void *data, u64 val) |
1769 | { |
1770 | struct spu_context *ctx = data; |
1771 | int ret; |
1772 | |
1773 | ret = spu_acquire_saved(ctx); |
1774 | if (ret) |
1775 | return ret; |
1776 | if (val) |
1777 | ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; |
1778 | else |
1779 | ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; |
1780 | spu_release_saved(ctx); |
1781 | |
1782 | return 0; |
1783 | } |
1784 | |
1785 | static u64 spufs_decr_status_get(struct spu_context *ctx) |
1786 | { |
1787 | if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) |
1788 | return SPU_DECR_STATUS_RUNNING; |
1789 | else |
1790 | return 0; |
1791 | } |
1792 | DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, |
1793 | spufs_decr_status_set, "0x%llx\n" , |
1794 | SPU_ATTR_ACQUIRE_SAVED); |
1795 | |
1796 | static int spufs_event_mask_set(void *data, u64 val) |
1797 | { |
1798 | struct spu_context *ctx = data; |
1799 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1800 | int ret; |
1801 | |
1802 | ret = spu_acquire_saved(ctx); |
1803 | if (ret) |
1804 | return ret; |
1805 | lscsa->event_mask.slot[0] = (u32) val; |
1806 | spu_release_saved(ctx); |
1807 | |
1808 | return 0; |
1809 | } |
1810 | |
1811 | static u64 spufs_event_mask_get(struct spu_context *ctx) |
1812 | { |
1813 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1814 | return lscsa->event_mask.slot[0]; |
1815 | } |
1816 | |
1817 | DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, |
1818 | spufs_event_mask_set, "0x%llx\n" , |
1819 | SPU_ATTR_ACQUIRE_SAVED); |
1820 | |
1821 | static u64 spufs_event_status_get(struct spu_context *ctx) |
1822 | { |
1823 | struct spu_state *state = &ctx->csa; |
1824 | u64 stat; |
1825 | stat = state->spu_chnlcnt_RW[0]; |
1826 | if (stat) |
1827 | return state->spu_chnldata_RW[0]; |
1828 | return 0; |
1829 | } |
1830 | DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, |
1831 | NULL, "0x%llx\n" , SPU_ATTR_ACQUIRE_SAVED) |
1832 | |
1833 | static int spufs_srr0_set(void *data, u64 val) |
1834 | { |
1835 | struct spu_context *ctx = data; |
1836 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1837 | int ret; |
1838 | |
1839 | ret = spu_acquire_saved(ctx); |
1840 | if (ret) |
1841 | return ret; |
1842 | lscsa->srr0.slot[0] = (u32) val; |
1843 | spu_release_saved(ctx); |
1844 | |
1845 | return 0; |
1846 | } |
1847 | |
1848 | static u64 spufs_srr0_get(struct spu_context *ctx) |
1849 | { |
1850 | struct spu_lscsa *lscsa = ctx->csa.lscsa; |
1851 | return lscsa->srr0.slot[0]; |
1852 | } |
1853 | DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, |
1854 | "0x%llx\n" , SPU_ATTR_ACQUIRE_SAVED) |
1855 | |
1856 | static u64 spufs_id_get(struct spu_context *ctx) |
1857 | { |
1858 | u64 num; |
1859 | |
1860 | if (ctx->state == SPU_STATE_RUNNABLE) |
1861 | num = ctx->spu->number; |
1862 | else |
1863 | num = (unsigned int)-1; |
1864 | |
1865 | return num; |
1866 | } |
1867 | DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n" , |
1868 | SPU_ATTR_ACQUIRE) |
1869 | |
1870 | static u64 spufs_object_id_get(struct spu_context *ctx) |
1871 | { |
1872 | /* FIXME: Should there really be no locking here? */ |
1873 | return ctx->object_id; |
1874 | } |
1875 | |
1876 | static int spufs_object_id_set(void *data, u64 id) |
1877 | { |
1878 | struct spu_context *ctx = data; |
1879 | ctx->object_id = id; |
1880 | |
1881 | return 0; |
1882 | } |
1883 | |
1884 | DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, |
1885 | spufs_object_id_set, "0x%llx\n" , SPU_ATTR_NOACQUIRE); |
1886 | |
1887 | static u64 spufs_lslr_get(struct spu_context *ctx) |
1888 | { |
1889 | return ctx->csa.priv2.spu_lslr_RW; |
1890 | } |
1891 | DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n" , |
1892 | SPU_ATTR_ACQUIRE_SAVED); |
1893 | |
1894 | static int spufs_info_open(struct inode *inode, struct file *file) |
1895 | { |
1896 | struct spufs_inode_info *i = SPUFS_I(inode); |
1897 | struct spu_context *ctx = i->i_ctx; |
1898 | file->private_data = ctx; |
1899 | return 0; |
1900 | } |
1901 | |
1902 | static int spufs_caps_show(struct seq_file *s, void *private) |
1903 | { |
1904 | struct spu_context *ctx = s->private; |
1905 | |
1906 | if (!(ctx->flags & SPU_CREATE_NOSCHED)) |
1907 | seq_puts(m: s, s: "sched\n" ); |
1908 | if (!(ctx->flags & SPU_CREATE_ISOLATE)) |
1909 | seq_puts(m: s, s: "step\n" ); |
1910 | return 0; |
1911 | } |
1912 | |
1913 | static int spufs_caps_open(struct inode *inode, struct file *file) |
1914 | { |
1915 | return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); |
1916 | } |
1917 | |
1918 | static const struct file_operations spufs_caps_fops = { |
1919 | .open = spufs_caps_open, |
1920 | .read = seq_read, |
1921 | .llseek = seq_lseek, |
1922 | .release = single_release, |
1923 | }; |
1924 | |
1925 | static ssize_t spufs_mbox_info_dump(struct spu_context *ctx, |
1926 | struct coredump_params *cprm) |
1927 | { |
1928 | if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) |
1929 | return 0; |
1930 | return spufs_dump_emit(cprm, buf: &ctx->csa.prob.pu_mb_R, |
1931 | size: sizeof(ctx->csa.prob.pu_mb_R)); |
1932 | } |
1933 | |
1934 | static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, |
1935 | size_t len, loff_t *pos) |
1936 | { |
1937 | struct spu_context *ctx = file->private_data; |
1938 | u32 stat, data; |
1939 | int ret; |
1940 | |
1941 | ret = spu_acquire_saved(ctx); |
1942 | if (ret) |
1943 | return ret; |
1944 | spin_lock(lock: &ctx->csa.register_lock); |
1945 | stat = ctx->csa.prob.mb_stat_R; |
1946 | data = ctx->csa.prob.pu_mb_R; |
1947 | spin_unlock(lock: &ctx->csa.register_lock); |
1948 | spu_release_saved(ctx); |
1949 | |
1950 | /* EOF if there's no entry in the mbox */ |
1951 | if (!(stat & 0x0000ff)) |
1952 | return 0; |
1953 | |
1954 | return simple_read_from_buffer(to: buf, count: len, ppos: pos, from: &data, available: sizeof(data)); |
1955 | } |
1956 | |
1957 | static const struct file_operations spufs_mbox_info_fops = { |
1958 | .open = spufs_info_open, |
1959 | .read = spufs_mbox_info_read, |
1960 | .llseek = generic_file_llseek, |
1961 | }; |
1962 | |
1963 | static ssize_t spufs_ibox_info_dump(struct spu_context *ctx, |
1964 | struct coredump_params *cprm) |
1965 | { |
1966 | if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) |
1967 | return 0; |
1968 | return spufs_dump_emit(cprm, buf: &ctx->csa.priv2.puint_mb_R, |
1969 | size: sizeof(ctx->csa.priv2.puint_mb_R)); |
1970 | } |
1971 | |
1972 | static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, |
1973 | size_t len, loff_t *pos) |
1974 | { |
1975 | struct spu_context *ctx = file->private_data; |
1976 | u32 stat, data; |
1977 | int ret; |
1978 | |
1979 | ret = spu_acquire_saved(ctx); |
1980 | if (ret) |
1981 | return ret; |
1982 | spin_lock(lock: &ctx->csa.register_lock); |
1983 | stat = ctx->csa.prob.mb_stat_R; |
1984 | data = ctx->csa.priv2.puint_mb_R; |
1985 | spin_unlock(lock: &ctx->csa.register_lock); |
1986 | spu_release_saved(ctx); |
1987 | |
1988 | /* EOF if there's no entry in the ibox */ |
1989 | if (!(stat & 0xff0000)) |
1990 | return 0; |
1991 | |
1992 | return simple_read_from_buffer(to: buf, count: len, ppos: pos, from: &data, available: sizeof(data)); |
1993 | } |
1994 | |
1995 | static const struct file_operations spufs_ibox_info_fops = { |
1996 | .open = spufs_info_open, |
1997 | .read = spufs_ibox_info_read, |
1998 | .llseek = generic_file_llseek, |
1999 | }; |
2000 | |
2001 | static size_t spufs_wbox_info_cnt(struct spu_context *ctx) |
2002 | { |
2003 | return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); |
2004 | } |
2005 | |
2006 | static ssize_t spufs_wbox_info_dump(struct spu_context *ctx, |
2007 | struct coredump_params *cprm) |
2008 | { |
2009 | return spufs_dump_emit(cprm, buf: &ctx->csa.spu_mailbox_data, |
2010 | size: spufs_wbox_info_cnt(ctx)); |
2011 | } |
2012 | |
2013 | static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, |
2014 | size_t len, loff_t *pos) |
2015 | { |
2016 | struct spu_context *ctx = file->private_data; |
2017 | u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; |
2018 | int ret, count; |
2019 | |
2020 | ret = spu_acquire_saved(ctx); |
2021 | if (ret) |
2022 | return ret; |
2023 | spin_lock(lock: &ctx->csa.register_lock); |
2024 | count = spufs_wbox_info_cnt(ctx); |
2025 | memcpy(to: &data, from: &ctx->csa.spu_mailbox_data, len: sizeof(data)); |
2026 | spin_unlock(lock: &ctx->csa.register_lock); |
2027 | spu_release_saved(ctx); |
2028 | |
2029 | return simple_read_from_buffer(to: buf, count: len, ppos: pos, from: &data, |
2030 | available: count * sizeof(u32)); |
2031 | } |
2032 | |
2033 | static const struct file_operations spufs_wbox_info_fops = { |
2034 | .open = spufs_info_open, |
2035 | .read = spufs_wbox_info_read, |
2036 | .llseek = generic_file_llseek, |
2037 | }; |
2038 | |
2039 | static void spufs_get_dma_info(struct spu_context *ctx, |
2040 | struct spu_dma_info *info) |
2041 | { |
2042 | int i; |
2043 | |
2044 | info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; |
2045 | info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; |
2046 | info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; |
2047 | info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; |
2048 | info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; |
2049 | for (i = 0; i < 16; i++) { |
2050 | struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; |
2051 | struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; |
2052 | |
2053 | qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; |
2054 | qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; |
2055 | qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; |
2056 | qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; |
2057 | } |
2058 | } |
2059 | |
2060 | static ssize_t spufs_dma_info_dump(struct spu_context *ctx, |
2061 | struct coredump_params *cprm) |
2062 | { |
2063 | struct spu_dma_info info; |
2064 | |
2065 | spufs_get_dma_info(ctx, info: &info); |
2066 | return spufs_dump_emit(cprm, buf: &info, size: sizeof(info)); |
2067 | } |
2068 | |
2069 | static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, |
2070 | size_t len, loff_t *pos) |
2071 | { |
2072 | struct spu_context *ctx = file->private_data; |
2073 | struct spu_dma_info info; |
2074 | int ret; |
2075 | |
2076 | ret = spu_acquire_saved(ctx); |
2077 | if (ret) |
2078 | return ret; |
2079 | spin_lock(lock: &ctx->csa.register_lock); |
2080 | spufs_get_dma_info(ctx, info: &info); |
2081 | spin_unlock(lock: &ctx->csa.register_lock); |
2082 | spu_release_saved(ctx); |
2083 | |
2084 | return simple_read_from_buffer(to: buf, count: len, ppos: pos, from: &info, |
2085 | available: sizeof(info)); |
2086 | } |
2087 | |
2088 | static const struct file_operations spufs_dma_info_fops = { |
2089 | .open = spufs_info_open, |
2090 | .read = spufs_dma_info_read, |
2091 | }; |
2092 | |
2093 | static void spufs_get_proxydma_info(struct spu_context *ctx, |
2094 | struct spu_proxydma_info *info) |
2095 | { |
2096 | int i; |
2097 | |
2098 | info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; |
2099 | info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; |
2100 | info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; |
2101 | |
2102 | for (i = 0; i < 8; i++) { |
2103 | struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; |
2104 | struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; |
2105 | |
2106 | qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; |
2107 | qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; |
2108 | qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; |
2109 | qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; |
2110 | } |
2111 | } |
2112 | |
2113 | static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx, |
2114 | struct coredump_params *cprm) |
2115 | { |
2116 | struct spu_proxydma_info info; |
2117 | |
2118 | spufs_get_proxydma_info(ctx, info: &info); |
2119 | return spufs_dump_emit(cprm, buf: &info, size: sizeof(info)); |
2120 | } |
2121 | |
2122 | static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, |
2123 | size_t len, loff_t *pos) |
2124 | { |
2125 | struct spu_context *ctx = file->private_data; |
2126 | struct spu_proxydma_info info; |
2127 | int ret; |
2128 | |
2129 | if (len < sizeof(info)) |
2130 | return -EINVAL; |
2131 | |
2132 | ret = spu_acquire_saved(ctx); |
2133 | if (ret) |
2134 | return ret; |
2135 | spin_lock(lock: &ctx->csa.register_lock); |
2136 | spufs_get_proxydma_info(ctx, info: &info); |
2137 | spin_unlock(lock: &ctx->csa.register_lock); |
2138 | spu_release_saved(ctx); |
2139 | |
2140 | return simple_read_from_buffer(to: buf, count: len, ppos: pos, from: &info, |
2141 | available: sizeof(info)); |
2142 | } |
2143 | |
2144 | static const struct file_operations spufs_proxydma_info_fops = { |
2145 | .open = spufs_info_open, |
2146 | .read = spufs_proxydma_info_read, |
2147 | }; |
2148 | |
2149 | static int spufs_show_tid(struct seq_file *s, void *private) |
2150 | { |
2151 | struct spu_context *ctx = s->private; |
2152 | |
2153 | seq_printf(m: s, fmt: "%d\n" , ctx->tid); |
2154 | return 0; |
2155 | } |
2156 | |
2157 | static int spufs_tid_open(struct inode *inode, struct file *file) |
2158 | { |
2159 | return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); |
2160 | } |
2161 | |
2162 | static const struct file_operations spufs_tid_fops = { |
2163 | .open = spufs_tid_open, |
2164 | .read = seq_read, |
2165 | .llseek = seq_lseek, |
2166 | .release = single_release, |
2167 | }; |
2168 | |
2169 | static const char *ctx_state_names[] = { |
2170 | "user" , "system" , "iowait" , "loaded" |
2171 | }; |
2172 | |
2173 | static unsigned long long spufs_acct_time(struct spu_context *ctx, |
2174 | enum spu_utilization_state state) |
2175 | { |
2176 | unsigned long long time = ctx->stats.times[state]; |
2177 | |
2178 | /* |
2179 | * In general, utilization statistics are updated by the controlling |
2180 | * thread as the spu context moves through various well defined |
2181 | * state transitions, but if the context is lazily loaded its |
2182 | * utilization statistics are not updated as the controlling thread |
2183 | * is not tightly coupled with the execution of the spu context. We |
2184 | * calculate and apply the time delta from the last recorded state |
2185 | * of the spu context. |
2186 | */ |
2187 | if (ctx->spu && ctx->stats.util_state == state) { |
2188 | time += ktime_get_ns() - ctx->stats.tstamp; |
2189 | } |
2190 | |
2191 | return time / NSEC_PER_MSEC; |
2192 | } |
2193 | |
2194 | static unsigned long long spufs_slb_flts(struct spu_context *ctx) |
2195 | { |
2196 | unsigned long long slb_flts = ctx->stats.slb_flt; |
2197 | |
2198 | if (ctx->state == SPU_STATE_RUNNABLE) { |
2199 | slb_flts += (ctx->spu->stats.slb_flt - |
2200 | ctx->stats.slb_flt_base); |
2201 | } |
2202 | |
2203 | return slb_flts; |
2204 | } |
2205 | |
2206 | static unsigned long long spufs_class2_intrs(struct spu_context *ctx) |
2207 | { |
2208 | unsigned long long class2_intrs = ctx->stats.class2_intr; |
2209 | |
2210 | if (ctx->state == SPU_STATE_RUNNABLE) { |
2211 | class2_intrs += (ctx->spu->stats.class2_intr - |
2212 | ctx->stats.class2_intr_base); |
2213 | } |
2214 | |
2215 | return class2_intrs; |
2216 | } |
2217 | |
2218 | |
2219 | static int spufs_show_stat(struct seq_file *s, void *private) |
2220 | { |
2221 | struct spu_context *ctx = s->private; |
2222 | int ret; |
2223 | |
2224 | ret = spu_acquire(ctx); |
2225 | if (ret) |
2226 | return ret; |
2227 | |
2228 | seq_printf(m: s, fmt: "%s %llu %llu %llu %llu " |
2229 | "%llu %llu %llu %llu %llu %llu %llu %llu\n" , |
2230 | ctx_state_names[ctx->stats.util_state], |
2231 | spufs_acct_time(ctx, state: SPU_UTIL_USER), |
2232 | spufs_acct_time(ctx, state: SPU_UTIL_SYSTEM), |
2233 | spufs_acct_time(ctx, state: SPU_UTIL_IOWAIT), |
2234 | spufs_acct_time(ctx, state: SPU_UTIL_IDLE_LOADED), |
2235 | ctx->stats.vol_ctx_switch, |
2236 | ctx->stats.invol_ctx_switch, |
2237 | spufs_slb_flts(ctx), |
2238 | ctx->stats.hash_flt, |
2239 | ctx->stats.min_flt, |
2240 | ctx->stats.maj_flt, |
2241 | spufs_class2_intrs(ctx), |
2242 | ctx->stats.libassist); |
2243 | spu_release(ctx); |
2244 | return 0; |
2245 | } |
2246 | |
2247 | static int spufs_stat_open(struct inode *inode, struct file *file) |
2248 | { |
2249 | return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); |
2250 | } |
2251 | |
2252 | static const struct file_operations spufs_stat_fops = { |
2253 | .open = spufs_stat_open, |
2254 | .read = seq_read, |
2255 | .llseek = seq_lseek, |
2256 | .release = single_release, |
2257 | }; |
2258 | |
2259 | static inline int spufs_switch_log_used(struct spu_context *ctx) |
2260 | { |
2261 | return (ctx->switch_log->head - ctx->switch_log->tail) % |
2262 | SWITCH_LOG_BUFSIZE; |
2263 | } |
2264 | |
2265 | static inline int spufs_switch_log_avail(struct spu_context *ctx) |
2266 | { |
2267 | return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); |
2268 | } |
2269 | |
2270 | static int spufs_switch_log_open(struct inode *inode, struct file *file) |
2271 | { |
2272 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2273 | int rc; |
2274 | |
2275 | rc = spu_acquire(ctx); |
2276 | if (rc) |
2277 | return rc; |
2278 | |
2279 | if (ctx->switch_log) { |
2280 | rc = -EBUSY; |
2281 | goto out; |
2282 | } |
2283 | |
2284 | ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log, |
2285 | SWITCH_LOG_BUFSIZE), GFP_KERNEL); |
2286 | |
2287 | if (!ctx->switch_log) { |
2288 | rc = -ENOMEM; |
2289 | goto out; |
2290 | } |
2291 | |
2292 | ctx->switch_log->head = ctx->switch_log->tail = 0; |
2293 | init_waitqueue_head(&ctx->switch_log->wait); |
2294 | rc = 0; |
2295 | |
2296 | out: |
2297 | spu_release(ctx); |
2298 | return rc; |
2299 | } |
2300 | |
2301 | static int spufs_switch_log_release(struct inode *inode, struct file *file) |
2302 | { |
2303 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2304 | int rc; |
2305 | |
2306 | rc = spu_acquire(ctx); |
2307 | if (rc) |
2308 | return rc; |
2309 | |
2310 | kfree(objp: ctx->switch_log); |
2311 | ctx->switch_log = NULL; |
2312 | spu_release(ctx); |
2313 | |
2314 | return 0; |
2315 | } |
2316 | |
2317 | static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) |
2318 | { |
2319 | struct switch_log_entry *p; |
2320 | |
2321 | p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; |
2322 | |
2323 | return snprintf(buf: tbuf, size: n, fmt: "%llu.%09u %d %u %u %llu\n" , |
2324 | (unsigned long long) p->tstamp.tv_sec, |
2325 | (unsigned int) p->tstamp.tv_nsec, |
2326 | p->spu_id, |
2327 | (unsigned int) p->type, |
2328 | (unsigned int) p->val, |
2329 | (unsigned long long) p->timebase); |
2330 | } |
2331 | |
2332 | static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, |
2333 | size_t len, loff_t *ppos) |
2334 | { |
2335 | struct inode *inode = file_inode(f: file); |
2336 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2337 | int error = 0, cnt = 0; |
2338 | |
2339 | if (!buf) |
2340 | return -EINVAL; |
2341 | |
2342 | error = spu_acquire(ctx); |
2343 | if (error) |
2344 | return error; |
2345 | |
2346 | while (cnt < len) { |
2347 | char tbuf[128]; |
2348 | int width; |
2349 | |
2350 | if (spufs_switch_log_used(ctx) == 0) { |
2351 | if (cnt > 0) { |
2352 | /* If there's data ready to go, we can |
2353 | * just return straight away */ |
2354 | break; |
2355 | |
2356 | } else if (file->f_flags & O_NONBLOCK) { |
2357 | error = -EAGAIN; |
2358 | break; |
2359 | |
2360 | } else { |
2361 | /* spufs_wait will drop the mutex and |
2362 | * re-acquire, but since we're in read(), the |
2363 | * file cannot be _released (and so |
2364 | * ctx->switch_log is stable). |
2365 | */ |
2366 | error = spufs_wait(ctx->switch_log->wait, |
2367 | spufs_switch_log_used(ctx) > 0); |
2368 | |
2369 | /* On error, spufs_wait returns without the |
2370 | * state mutex held */ |
2371 | if (error) |
2372 | return error; |
2373 | |
2374 | /* We may have had entries read from underneath |
2375 | * us while we dropped the mutex in spufs_wait, |
2376 | * so re-check */ |
2377 | if (spufs_switch_log_used(ctx) == 0) |
2378 | continue; |
2379 | } |
2380 | } |
2381 | |
2382 | width = switch_log_sprint(ctx, tbuf, n: sizeof(tbuf)); |
2383 | if (width < len) |
2384 | ctx->switch_log->tail = |
2385 | (ctx->switch_log->tail + 1) % |
2386 | SWITCH_LOG_BUFSIZE; |
2387 | else |
2388 | /* If the record is greater than space available return |
2389 | * partial buffer (so far) */ |
2390 | break; |
2391 | |
2392 | error = copy_to_user(to: buf + cnt, from: tbuf, n: width); |
2393 | if (error) |
2394 | break; |
2395 | cnt += width; |
2396 | } |
2397 | |
2398 | spu_release(ctx); |
2399 | |
2400 | return cnt == 0 ? error : cnt; |
2401 | } |
2402 | |
2403 | static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait) |
2404 | { |
2405 | struct inode *inode = file_inode(f: file); |
2406 | struct spu_context *ctx = SPUFS_I(inode)->i_ctx; |
2407 | __poll_t mask = 0; |
2408 | int rc; |
2409 | |
2410 | poll_wait(filp: file, wait_address: &ctx->switch_log->wait, p: wait); |
2411 | |
2412 | rc = spu_acquire(ctx); |
2413 | if (rc) |
2414 | return rc; |
2415 | |
2416 | if (spufs_switch_log_used(ctx) > 0) |
2417 | mask |= EPOLLIN; |
2418 | |
2419 | spu_release(ctx); |
2420 | |
2421 | return mask; |
2422 | } |
2423 | |
2424 | static const struct file_operations spufs_switch_log_fops = { |
2425 | .open = spufs_switch_log_open, |
2426 | .read = spufs_switch_log_read, |
2427 | .poll = spufs_switch_log_poll, |
2428 | .release = spufs_switch_log_release, |
2429 | }; |
2430 | |
2431 | /** |
2432 | * Log a context switch event to a switch log reader. |
2433 | * |
2434 | * Must be called with ctx->state_mutex held. |
2435 | */ |
2436 | void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, |
2437 | u32 type, u32 val) |
2438 | { |
2439 | if (!ctx->switch_log) |
2440 | return; |
2441 | |
2442 | if (spufs_switch_log_avail(ctx) > 1) { |
2443 | struct switch_log_entry *p; |
2444 | |
2445 | p = ctx->switch_log->log + ctx->switch_log->head; |
2446 | ktime_get_ts64(ts: &p->tstamp); |
2447 | p->timebase = get_tb(); |
2448 | p->spu_id = spu ? spu->number : -1; |
2449 | p->type = type; |
2450 | p->val = val; |
2451 | |
2452 | ctx->switch_log->head = |
2453 | (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; |
2454 | } |
2455 | |
2456 | wake_up(&ctx->switch_log->wait); |
2457 | } |
2458 | |
2459 | static int spufs_show_ctx(struct seq_file *s, void *private) |
2460 | { |
2461 | struct spu_context *ctx = s->private; |
2462 | u64 mfc_control_RW; |
2463 | |
2464 | mutex_lock(&ctx->state_mutex); |
2465 | if (ctx->spu) { |
2466 | struct spu *spu = ctx->spu; |
2467 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
2468 | |
2469 | spin_lock_irq(lock: &spu->register_lock); |
2470 | mfc_control_RW = in_be64(&priv2->mfc_control_RW); |
2471 | spin_unlock_irq(lock: &spu->register_lock); |
2472 | } else { |
2473 | struct spu_state *csa = &ctx->csa; |
2474 | |
2475 | mfc_control_RW = csa->priv2.mfc_control_RW; |
2476 | } |
2477 | |
2478 | seq_printf(m: s, fmt: "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" |
2479 | " %c %llx %llx %llx %llx %x %x\n" , |
2480 | ctx->state == SPU_STATE_SAVED ? 'S' : 'R', |
2481 | ctx->flags, |
2482 | ctx->sched_flags, |
2483 | ctx->prio, |
2484 | ctx->time_slice, |
2485 | ctx->spu ? ctx->spu->number : -1, |
2486 | !list_empty(head: &ctx->rq) ? 'q' : ' ', |
2487 | ctx->csa.class_0_pending, |
2488 | ctx->csa.class_0_dar, |
2489 | ctx->csa.class_1_dsisr, |
2490 | mfc_control_RW, |
2491 | ctx->ops->runcntl_read(ctx), |
2492 | ctx->ops->status_read(ctx)); |
2493 | |
2494 | mutex_unlock(lock: &ctx->state_mutex); |
2495 | |
2496 | return 0; |
2497 | } |
2498 | |
2499 | static int spufs_ctx_open(struct inode *inode, struct file *file) |
2500 | { |
2501 | return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); |
2502 | } |
2503 | |
2504 | static const struct file_operations spufs_ctx_fops = { |
2505 | .open = spufs_ctx_open, |
2506 | .read = seq_read, |
2507 | .llseek = seq_lseek, |
2508 | .release = single_release, |
2509 | }; |
2510 | |
2511 | const struct spufs_tree_descr spufs_dir_contents[] = { |
2512 | { "capabilities" , &spufs_caps_fops, 0444, }, |
2513 | { "mem" , &spufs_mem_fops, 0666, LS_SIZE, }, |
2514 | { "regs" , &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, |
2515 | { "mbox" , &spufs_mbox_fops, 0444, }, |
2516 | { "ibox" , &spufs_ibox_fops, 0444, }, |
2517 | { "wbox" , &spufs_wbox_fops, 0222, }, |
2518 | { "mbox_stat" , &spufs_mbox_stat_fops, 0444, sizeof(u32), }, |
2519 | { "ibox_stat" , &spufs_ibox_stat_fops, 0444, sizeof(u32), }, |
2520 | { "wbox_stat" , &spufs_wbox_stat_fops, 0444, sizeof(u32), }, |
2521 | { "signal1" , &spufs_signal1_fops, 0666, }, |
2522 | { "signal2" , &spufs_signal2_fops, 0666, }, |
2523 | { "signal1_type" , &spufs_signal1_type, 0666, }, |
2524 | { "signal2_type" , &spufs_signal2_type, 0666, }, |
2525 | { "cntl" , &spufs_cntl_fops, 0666, }, |
2526 | { "fpcr" , &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, |
2527 | { "lslr" , &spufs_lslr_ops, 0444, }, |
2528 | { "mfc" , &spufs_mfc_fops, 0666, }, |
2529 | { "mss" , &spufs_mss_fops, 0666, }, |
2530 | { "npc" , &spufs_npc_ops, 0666, }, |
2531 | { "srr0" , &spufs_srr0_ops, 0666, }, |
2532 | { "decr" , &spufs_decr_ops, 0666, }, |
2533 | { "decr_status" , &spufs_decr_status_ops, 0666, }, |
2534 | { "event_mask" , &spufs_event_mask_ops, 0666, }, |
2535 | { "event_status" , &spufs_event_status_ops, 0444, }, |
2536 | { "psmap" , &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, |
2537 | { "phys-id" , &spufs_id_ops, 0666, }, |
2538 | { "object-id" , &spufs_object_id_ops, 0666, }, |
2539 | { "mbox_info" , &spufs_mbox_info_fops, 0444, sizeof(u32), }, |
2540 | { "ibox_info" , &spufs_ibox_info_fops, 0444, sizeof(u32), }, |
2541 | { "wbox_info" , &spufs_wbox_info_fops, 0444, sizeof(u32), }, |
2542 | { "dma_info" , &spufs_dma_info_fops, 0444, |
2543 | sizeof(struct spu_dma_info), }, |
2544 | { "proxydma_info" , &spufs_proxydma_info_fops, 0444, |
2545 | sizeof(struct spu_proxydma_info)}, |
2546 | { "tid" , &spufs_tid_fops, 0444, }, |
2547 | { "stat" , &spufs_stat_fops, 0444, }, |
2548 | { "switch_log" , &spufs_switch_log_fops, 0444 }, |
2549 | {}, |
2550 | }; |
2551 | |
2552 | const struct spufs_tree_descr spufs_dir_nosched_contents[] = { |
2553 | { "capabilities" , &spufs_caps_fops, 0444, }, |
2554 | { "mem" , &spufs_mem_fops, 0666, LS_SIZE, }, |
2555 | { "mbox" , &spufs_mbox_fops, 0444, }, |
2556 | { "ibox" , &spufs_ibox_fops, 0444, }, |
2557 | { "wbox" , &spufs_wbox_fops, 0222, }, |
2558 | { "mbox_stat" , &spufs_mbox_stat_fops, 0444, sizeof(u32), }, |
2559 | { "ibox_stat" , &spufs_ibox_stat_fops, 0444, sizeof(u32), }, |
2560 | { "wbox_stat" , &spufs_wbox_stat_fops, 0444, sizeof(u32), }, |
2561 | { "signal1" , &spufs_signal1_nosched_fops, 0222, }, |
2562 | { "signal2" , &spufs_signal2_nosched_fops, 0222, }, |
2563 | { "signal1_type" , &spufs_signal1_type, 0666, }, |
2564 | { "signal2_type" , &spufs_signal2_type, 0666, }, |
2565 | { "mss" , &spufs_mss_fops, 0666, }, |
2566 | { "mfc" , &spufs_mfc_fops, 0666, }, |
2567 | { "cntl" , &spufs_cntl_fops, 0666, }, |
2568 | { "npc" , &spufs_npc_ops, 0666, }, |
2569 | { "psmap" , &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, |
2570 | { "phys-id" , &spufs_id_ops, 0666, }, |
2571 | { "object-id" , &spufs_object_id_ops, 0666, }, |
2572 | { "tid" , &spufs_tid_fops, 0444, }, |
2573 | { "stat" , &spufs_stat_fops, 0444, }, |
2574 | {}, |
2575 | }; |
2576 | |
2577 | const struct spufs_tree_descr spufs_dir_debug_contents[] = { |
2578 | { ".ctx" , &spufs_ctx_fops, 0444, }, |
2579 | {}, |
2580 | }; |
2581 | |
2582 | const struct spufs_coredump_reader spufs_coredump_read[] = { |
2583 | { "regs" , spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])}, |
2584 | { "fpcr" , spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) }, |
2585 | { "lslr" , NULL, spufs_lslr_get, 19 }, |
2586 | { "decr" , NULL, spufs_decr_get, 19 }, |
2587 | { "decr_status" , NULL, spufs_decr_status_get, 19 }, |
2588 | { "mem" , spufs_mem_dump, NULL, LS_SIZE, }, |
2589 | { "signal1" , spufs_signal1_dump, NULL, sizeof(u32) }, |
2590 | { "signal1_type" , NULL, spufs_signal1_type_get, 19 }, |
2591 | { "signal2" , spufs_signal2_dump, NULL, sizeof(u32) }, |
2592 | { "signal2_type" , NULL, spufs_signal2_type_get, 19 }, |
2593 | { "event_mask" , NULL, spufs_event_mask_get, 19 }, |
2594 | { "event_status" , NULL, spufs_event_status_get, 19 }, |
2595 | { "mbox_info" , spufs_mbox_info_dump, NULL, sizeof(u32) }, |
2596 | { "ibox_info" , spufs_ibox_info_dump, NULL, sizeof(u32) }, |
2597 | { "wbox_info" , spufs_wbox_info_dump, NULL, 4 * sizeof(u32)}, |
2598 | { "dma_info" , spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)}, |
2599 | { "proxydma_info" , spufs_proxydma_info_dump, |
2600 | NULL, sizeof(struct spu_proxydma_info)}, |
2601 | { "object-id" , NULL, spufs_object_id_get, 19 }, |
2602 | { "npc" , NULL, spufs_npc_get, 19 }, |
2603 | { NULL }, |
2604 | }; |
2605 | |