1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright 2014 IBM Corp. |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/kernel.h> |
8 | #include <linux/bitmap.h> |
9 | #include <linux/sched.h> |
10 | #include <linux/pid.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/debugfs.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/idr.h> |
16 | #include <linux/sched/mm.h> |
17 | #include <linux/mmu_context.h> |
18 | #include <asm/cputable.h> |
19 | #include <asm/current.h> |
20 | #include <asm/copro.h> |
21 | |
22 | #include "cxl.h" |
23 | |
24 | /* |
25 | * Allocates space for a CXL context. |
26 | */ |
27 | struct cxl_context *cxl_context_alloc(void) |
28 | { |
29 | return kzalloc(size: sizeof(struct cxl_context), GFP_KERNEL); |
30 | } |
31 | |
32 | /* |
33 | * Initialises a CXL context. |
34 | */ |
35 | int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) |
36 | { |
37 | int i; |
38 | |
39 | ctx->afu = afu; |
40 | ctx->master = master; |
41 | ctx->pid = NULL; /* Set in start work ioctl */ |
42 | mutex_init(&ctx->mapping_lock); |
43 | ctx->mapping = NULL; |
44 | ctx->tidr = 0; |
45 | ctx->assign_tidr = false; |
46 | |
47 | if (cxl_is_power8()) { |
48 | spin_lock_init(&ctx->sste_lock); |
49 | |
50 | /* |
51 | * Allocate the segment table before we put it in the IDR so that we |
52 | * can always access it when dereferenced from IDR. For the same |
53 | * reason, the segment table is only destroyed after the context is |
54 | * removed from the IDR. Access to this in the IOCTL is protected by |
55 | * Linux filesystem semantics (can't IOCTL until open is complete). |
56 | */ |
57 | i = cxl_alloc_sst(ctx); |
58 | if (i) |
59 | return i; |
60 | } |
61 | |
62 | INIT_WORK(&ctx->fault_work, cxl_handle_fault); |
63 | |
64 | init_waitqueue_head(&ctx->wq); |
65 | spin_lock_init(&ctx->lock); |
66 | |
67 | ctx->irq_bitmap = NULL; |
68 | ctx->pending_irq = false; |
69 | ctx->pending_fault = false; |
70 | ctx->pending_afu_err = false; |
71 | |
72 | INIT_LIST_HEAD(list: &ctx->irq_names); |
73 | |
74 | /* |
75 | * When we have to destroy all contexts in cxl_context_detach_all() we |
76 | * end up with afu_release_irqs() called from inside a |
77 | * idr_for_each_entry(). Hence we need to make sure that anything |
78 | * dereferenced from this IDR is ok before we allocate the IDR here. |
79 | * This clears out the IRQ ranges to ensure this. |
80 | */ |
81 | for (i = 0; i < CXL_IRQ_RANGES; i++) |
82 | ctx->irqs.range[i] = 0; |
83 | |
84 | mutex_init(&ctx->status_mutex); |
85 | |
86 | ctx->status = OPENED; |
87 | |
88 | /* |
89 | * Allocating IDR! We better make sure everything's setup that |
90 | * dereferences from it. |
91 | */ |
92 | mutex_lock(&afu->contexts_lock); |
93 | idr_preload(GFP_KERNEL); |
94 | i = idr_alloc(&ctx->afu->contexts_idr, ptr: ctx, start: 0, |
95 | end: ctx->afu->num_procs, GFP_NOWAIT); |
96 | idr_preload_end(); |
97 | mutex_unlock(lock: &afu->contexts_lock); |
98 | if (i < 0) |
99 | return i; |
100 | |
101 | ctx->pe = i; |
102 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
103 | ctx->elem = &ctx->afu->native->spa[i]; |
104 | ctx->external_pe = ctx->pe; |
105 | } else { |
106 | ctx->external_pe = -1; /* assigned when attaching */ |
107 | } |
108 | ctx->pe_inserted = false; |
109 | |
110 | /* |
111 | * take a ref on the afu so that it stays alive at-least till |
112 | * this context is reclaimed inside reclaim_ctx. |
113 | */ |
114 | cxl_afu_get(afu); |
115 | return 0; |
116 | } |
117 | |
118 | void cxl_context_set_mapping(struct cxl_context *ctx, |
119 | struct address_space *mapping) |
120 | { |
121 | mutex_lock(&ctx->mapping_lock); |
122 | ctx->mapping = mapping; |
123 | mutex_unlock(lock: &ctx->mapping_lock); |
124 | } |
125 | |
126 | static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) |
127 | { |
128 | struct vm_area_struct *vma = vmf->vma; |
129 | struct cxl_context *ctx = vma->vm_file->private_data; |
130 | u64 area, offset; |
131 | vm_fault_t ret; |
132 | |
133 | offset = vmf->pgoff << PAGE_SHIFT; |
134 | |
135 | pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n" , |
136 | __func__, ctx->pe, vmf->address, offset); |
137 | |
138 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { |
139 | area = ctx->afu->psn_phys; |
140 | if (offset >= ctx->afu->adapter->ps_size) |
141 | return VM_FAULT_SIGBUS; |
142 | } else { |
143 | area = ctx->psn_phys; |
144 | if (offset >= ctx->psn_size) |
145 | return VM_FAULT_SIGBUS; |
146 | } |
147 | |
148 | mutex_lock(&ctx->status_mutex); |
149 | |
150 | if (ctx->status != STARTED) { |
151 | mutex_unlock(lock: &ctx->status_mutex); |
152 | pr_devel("%s: Context not started, failing problem state access\n" , __func__); |
153 | if (ctx->mmio_err_ff) { |
154 | if (!ctx->ff_page) { |
155 | ctx->ff_page = alloc_page(GFP_USER); |
156 | if (!ctx->ff_page) |
157 | return VM_FAULT_OOM; |
158 | memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE); |
159 | } |
160 | get_page(page: ctx->ff_page); |
161 | vmf->page = ctx->ff_page; |
162 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); |
163 | return 0; |
164 | } |
165 | return VM_FAULT_SIGBUS; |
166 | } |
167 | |
168 | ret = vmf_insert_pfn(vma, addr: vmf->address, pfn: (area + offset) >> PAGE_SHIFT); |
169 | |
170 | mutex_unlock(lock: &ctx->status_mutex); |
171 | |
172 | return ret; |
173 | } |
174 | |
175 | static const struct vm_operations_struct cxl_mmap_vmops = { |
176 | .fault = cxl_mmap_fault, |
177 | }; |
178 | |
179 | /* |
180 | * Map a per-context mmio space into the given vma. |
181 | */ |
182 | int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) |
183 | { |
184 | u64 start = vma->vm_pgoff << PAGE_SHIFT; |
185 | u64 len = vma->vm_end - vma->vm_start; |
186 | |
187 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { |
188 | if (start + len > ctx->afu->adapter->ps_size) |
189 | return -EINVAL; |
190 | |
191 | if (cxl_is_power9()) { |
192 | /* |
193 | * Make sure there is a valid problem state |
194 | * area space for this AFU. |
195 | */ |
196 | if (ctx->master && !ctx->afu->psa) { |
197 | pr_devel("AFU doesn't support mmio space\n" ); |
198 | return -EINVAL; |
199 | } |
200 | |
201 | /* Can't mmap until the AFU is enabled */ |
202 | if (!ctx->afu->enabled) |
203 | return -EBUSY; |
204 | } |
205 | } else { |
206 | if (start + len > ctx->psn_size) |
207 | return -EINVAL; |
208 | |
209 | /* Make sure there is a valid per process space for this AFU */ |
210 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { |
211 | pr_devel("AFU doesn't support mmio space\n" ); |
212 | return -EINVAL; |
213 | } |
214 | |
215 | /* Can't mmap until the AFU is enabled */ |
216 | if (!ctx->afu->enabled) |
217 | return -EBUSY; |
218 | } |
219 | |
220 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n" , __func__, |
221 | ctx->psn_phys, ctx->pe , ctx->master); |
222 | |
223 | vm_flags_set(vma, VM_IO | VM_PFNMAP); |
224 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
225 | vma->vm_ops = &cxl_mmap_vmops; |
226 | return 0; |
227 | } |
228 | |
229 | /* |
230 | * Detach a context from the hardware. This disables interrupts and doesn't |
231 | * return until all outstanding interrupts for this context have completed. The |
232 | * hardware should no longer access *ctx after this has returned. |
233 | */ |
234 | int __detach_context(struct cxl_context *ctx) |
235 | { |
236 | enum cxl_context_status status; |
237 | |
238 | mutex_lock(&ctx->status_mutex); |
239 | status = ctx->status; |
240 | ctx->status = CLOSED; |
241 | mutex_unlock(lock: &ctx->status_mutex); |
242 | if (status != STARTED) |
243 | return -EBUSY; |
244 | |
245 | /* Only warn if we detached while the link was OK. |
246 | * If detach fails when hw is down, we don't care. |
247 | */ |
248 | WARN_ON(cxl_ops->detach_process(ctx) && |
249 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); |
250 | flush_work(work: &ctx->fault_work); /* Only needed for dedicated process */ |
251 | |
252 | /* |
253 | * Wait until no further interrupts are presented by the PSL |
254 | * for this context. |
255 | */ |
256 | if (cxl_ops->irq_wait) |
257 | cxl_ops->irq_wait(ctx); |
258 | |
259 | /* release the reference to the group leader and mm handling pid */ |
260 | put_pid(pid: ctx->pid); |
261 | |
262 | cxl_ctx_put(); |
263 | |
264 | /* Decrease the attached context count on the adapter */ |
265 | cxl_adapter_context_put(adapter: ctx->afu->adapter); |
266 | |
267 | /* Decrease the mm count on the context */ |
268 | cxl_context_mm_count_put(ctx); |
269 | if (ctx->mm) |
270 | mm_context_remove_copro(ctx->mm); |
271 | ctx->mm = NULL; |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | /* |
277 | * Detach the given context from the AFU. This doesn't actually |
278 | * free the context but it should stop the context running in hardware |
279 | * (ie. prevent this context from generating any further interrupts |
280 | * so that it can be freed). |
281 | */ |
282 | void cxl_context_detach(struct cxl_context *ctx) |
283 | { |
284 | int rc; |
285 | |
286 | rc = __detach_context(ctx); |
287 | if (rc) |
288 | return; |
289 | |
290 | afu_release_irqs(ctx, cookie: ctx); |
291 | wake_up_all(&ctx->wq); |
292 | } |
293 | |
294 | /* |
295 | * Detach all contexts on the given AFU. |
296 | */ |
297 | void cxl_context_detach_all(struct cxl_afu *afu) |
298 | { |
299 | struct cxl_context *ctx; |
300 | int tmp; |
301 | |
302 | mutex_lock(&afu->contexts_lock); |
303 | idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { |
304 | /* |
305 | * Anything done in here needs to be setup before the IDR is |
306 | * created and torn down after the IDR removed |
307 | */ |
308 | cxl_context_detach(ctx); |
309 | |
310 | /* |
311 | * We are force detaching - remove any active PSA mappings so |
312 | * userspace cannot interfere with the card if it comes back. |
313 | * Easiest way to exercise this is to unbind and rebind the |
314 | * driver via sysfs while it is in use. |
315 | */ |
316 | mutex_lock(&ctx->mapping_lock); |
317 | if (ctx->mapping) |
318 | unmap_mapping_range(mapping: ctx->mapping, holebegin: 0, holelen: 0, even_cows: 1); |
319 | mutex_unlock(lock: &ctx->mapping_lock); |
320 | } |
321 | mutex_unlock(lock: &afu->contexts_lock); |
322 | } |
323 | |
324 | static void reclaim_ctx(struct rcu_head *rcu) |
325 | { |
326 | struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); |
327 | |
328 | if (cxl_is_power8()) |
329 | free_page((u64)ctx->sstp); |
330 | if (ctx->ff_page) |
331 | __free_page(ctx->ff_page); |
332 | ctx->sstp = NULL; |
333 | |
334 | bitmap_free(bitmap: ctx->irq_bitmap); |
335 | |
336 | /* Drop ref to the afu device taken during cxl_context_init */ |
337 | cxl_afu_put(afu: ctx->afu); |
338 | |
339 | kfree(objp: ctx); |
340 | } |
341 | |
342 | void cxl_context_free(struct cxl_context *ctx) |
343 | { |
344 | if (ctx->kernelapi && ctx->mapping) |
345 | cxl_release_mapping(ctx); |
346 | mutex_lock(&ctx->afu->contexts_lock); |
347 | idr_remove(&ctx->afu->contexts_idr, id: ctx->pe); |
348 | mutex_unlock(lock: &ctx->afu->contexts_lock); |
349 | call_rcu(head: &ctx->rcu, func: reclaim_ctx); |
350 | } |
351 | |
352 | void cxl_context_mm_count_get(struct cxl_context *ctx) |
353 | { |
354 | if (ctx->mm) |
355 | mmgrab(mm: ctx->mm); |
356 | } |
357 | |
358 | void cxl_context_mm_count_put(struct cxl_context *ctx) |
359 | { |
360 | if (ctx->mm) |
361 | mmdrop(mm: ctx->mm); |
362 | } |
363 | |