1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Low-level SPU handling |
4 | * |
5 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
6 | * |
7 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
8 | */ |
9 | #include <linux/sched/signal.h> |
10 | #include <linux/mm.h> |
11 | |
12 | #include <asm/spu.h> |
13 | #include <asm/spu_csa.h> |
14 | |
15 | #include "spufs.h" |
16 | |
17 | /** |
18 | * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. |
19 | * |
20 | * If the context was created with events, we just set the return event. |
21 | * Otherwise, send an appropriate signal to the process. |
22 | */ |
23 | static void spufs_handle_event(struct spu_context *ctx, |
24 | unsigned long ea, int type) |
25 | { |
26 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { |
27 | ctx->event_return |= type; |
28 | wake_up_all(&ctx->stop_wq); |
29 | return; |
30 | } |
31 | |
32 | switch (type) { |
33 | case SPE_EVENT_INVALID_DMA: |
34 | force_sig_fault(SIGBUS, BUS_OBJERR, NULL); |
35 | break; |
36 | case SPE_EVENT_SPE_DATA_STORAGE: |
37 | ctx->ops->restart_dma(ctx); |
38 | force_sig_fault(SIGSEGV, SEGV_ACCERR, addr: (void __user *)ea); |
39 | break; |
40 | case SPE_EVENT_DMA_ALIGNMENT: |
41 | /* DAR isn't set for an alignment fault :( */ |
42 | force_sig_fault(SIGBUS, BUS_ADRALN, NULL); |
43 | break; |
44 | case SPE_EVENT_SPE_ERROR: |
45 | force_sig_fault( |
46 | SIGILL, ILL_ILLOPC, |
47 | addr: (void __user *)(unsigned long) |
48 | ctx->ops->npc_read(ctx) - 4); |
49 | break; |
50 | } |
51 | } |
52 | |
53 | int spufs_handle_class0(struct spu_context *ctx) |
54 | { |
55 | unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; |
56 | |
57 | if (likely(!stat)) |
58 | return 0; |
59 | |
60 | if (stat & CLASS0_DMA_ALIGNMENT_INTR) |
61 | spufs_handle_event(ctx, ea: ctx->csa.class_0_dar, |
62 | type: SPE_EVENT_DMA_ALIGNMENT); |
63 | |
64 | if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) |
65 | spufs_handle_event(ctx, ea: ctx->csa.class_0_dar, |
66 | type: SPE_EVENT_INVALID_DMA); |
67 | |
68 | if (stat & CLASS0_SPU_ERROR_INTR) |
69 | spufs_handle_event(ctx, ea: ctx->csa.class_0_dar, |
70 | type: SPE_EVENT_SPE_ERROR); |
71 | |
72 | ctx->csa.class_0_pending = 0; |
73 | |
74 | return -EIO; |
75 | } |
76 | |
77 | /* |
78 | * bottom half handler for page faults, we can't do this from |
79 | * interrupt context, since we might need to sleep. |
80 | * we also need to give up the mutex so we can get scheduled |
81 | * out while waiting for the backing store. |
82 | * |
83 | * TODO: try calling hash_page from the interrupt handler first |
84 | * in order to speed up the easy case. |
85 | */ |
86 | int spufs_handle_class1(struct spu_context *ctx) |
87 | { |
88 | u64 ea, dsisr, access; |
89 | unsigned long flags; |
90 | vm_fault_t flt = 0; |
91 | int ret; |
92 | |
93 | /* |
94 | * dar and dsisr get passed from the registers |
95 | * to the spu_context, to this function, but not |
96 | * back to the spu if it gets scheduled again. |
97 | * |
98 | * if we don't handle the fault for a saved context |
99 | * in time, we can still expect to get the same fault |
100 | * the immediately after the context restore. |
101 | */ |
102 | ea = ctx->csa.class_1_dar; |
103 | dsisr = ctx->csa.class_1_dsisr; |
104 | |
105 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) |
106 | return 0; |
107 | |
108 | spuctx_switch_state(ctx, new_state: SPU_UTIL_IOWAIT); |
109 | |
110 | pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n" , ctx, ea, |
111 | dsisr, ctx->state); |
112 | |
113 | ctx->stats.hash_flt++; |
114 | if (ctx->state == SPU_STATE_RUNNABLE) |
115 | ctx->spu->stats.hash_flt++; |
116 | |
117 | /* we must not hold the lock when entering copro_handle_mm_fault */ |
118 | spu_release(ctx); |
119 | |
120 | access = (_PAGE_PRESENT | _PAGE_READ); |
121 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL; |
122 | local_irq_save(flags); |
123 | ret = hash_page(ea, access, 0x300, dsisr); |
124 | local_irq_restore(flags); |
125 | |
126 | /* hashing failed, so try the actual fault handler */ |
127 | if (ret) |
128 | ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); |
129 | |
130 | /* |
131 | * This is nasty: we need the state_mutex for all the bookkeeping even |
132 | * if the syscall was interrupted by a signal. ewww. |
133 | */ |
134 | mutex_lock(&ctx->state_mutex); |
135 | |
136 | /* |
137 | * Clear dsisr under ctxt lock after handling the fault, so that |
138 | * time slicing will not preempt the context while the page fault |
139 | * handler is running. Context switch code removes mappings. |
140 | */ |
141 | ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; |
142 | |
143 | /* |
144 | * If we handled the fault successfully and are in runnable |
145 | * state, restart the DMA. |
146 | * In case of unhandled error report the problem to user space. |
147 | */ |
148 | if (!ret) { |
149 | if (flt & VM_FAULT_MAJOR) |
150 | ctx->stats.maj_flt++; |
151 | else |
152 | ctx->stats.min_flt++; |
153 | if (ctx->state == SPU_STATE_RUNNABLE) { |
154 | if (flt & VM_FAULT_MAJOR) |
155 | ctx->spu->stats.maj_flt++; |
156 | else |
157 | ctx->spu->stats.min_flt++; |
158 | } |
159 | |
160 | if (ctx->spu) |
161 | ctx->ops->restart_dma(ctx); |
162 | } else |
163 | spufs_handle_event(ctx, ea, type: SPE_EVENT_SPE_DATA_STORAGE); |
164 | |
165 | spuctx_switch_state(ctx, new_state: SPU_UTIL_SYSTEM); |
166 | return ret; |
167 | } |
168 | |