1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* arch/sparc64/mm/tsb.c |
3 | * |
4 | * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/preempt.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/mm_types.h> |
11 | #include <linux/pgtable.h> |
12 | |
13 | #include <asm/page.h> |
14 | #include <asm/mmu_context.h> |
15 | #include <asm/setup.h> |
16 | #include <asm/tsb.h> |
17 | #include <asm/tlb.h> |
18 | #include <asm/oplib.h> |
19 | |
20 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
21 | |
22 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) |
23 | { |
24 | vaddr >>= hash_shift; |
25 | return vaddr & (nentries - 1); |
26 | } |
27 | |
28 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) |
29 | { |
30 | return (tag == (vaddr >> 22)); |
31 | } |
32 | |
33 | static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) |
34 | { |
35 | unsigned long idx; |
36 | |
37 | for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { |
38 | struct tsb *ent = &swapper_tsb[idx]; |
39 | unsigned long match = idx << 13; |
40 | |
41 | match |= (ent->tag << 22); |
42 | if (match >= start && match < end) |
43 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
44 | } |
45 | } |
46 | |
47 | /* TSB flushes need only occur on the processor initiating the address |
48 | * space modification, not on each cpu the address space has run on. |
49 | * Only the TLB flush needs that treatment. |
50 | */ |
51 | |
52 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
53 | { |
54 | unsigned long v; |
55 | |
56 | if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) |
57 | return flush_tsb_kernel_range_scan(start, end); |
58 | |
59 | for (v = start; v < end; v += PAGE_SIZE) { |
60 | unsigned long hash = tsb_hash(vaddr: v, PAGE_SHIFT, |
61 | nentries: KERNEL_TSB_NENTRIES); |
62 | struct tsb *ent = &swapper_tsb[hash]; |
63 | |
64 | if (tag_compare(ent->tag, v)) |
65 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
66 | } |
67 | } |
68 | |
69 | static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, |
70 | unsigned long hash_shift, |
71 | unsigned long nentries) |
72 | { |
73 | unsigned long tag, ent, hash; |
74 | |
75 | v &= ~0x1UL; |
76 | hash = tsb_hash(vaddr: v, hash_shift, nentries); |
77 | ent = tsb + (hash * sizeof(struct tsb)); |
78 | tag = (v >> 22UL); |
79 | |
80 | tsb_flush(ent, tag); |
81 | } |
82 | |
83 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
84 | unsigned long tsb, unsigned long nentries) |
85 | { |
86 | unsigned long i; |
87 | |
88 | for (i = 0; i < tb->tlb_nr; i++) |
89 | __flush_tsb_one_entry(tsb, v: tb->vaddrs[i], hash_shift, nentries); |
90 | } |
91 | |
92 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
93 | static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v, |
94 | unsigned long hash_shift, |
95 | unsigned long nentries, |
96 | unsigned int hugepage_shift) |
97 | { |
98 | unsigned int hpage_entries; |
99 | unsigned int i; |
100 | |
101 | hpage_entries = 1 << (hugepage_shift - hash_shift); |
102 | for (i = 0; i < hpage_entries; i++) |
103 | __flush_tsb_one_entry(tsb, v: v + (i << hash_shift), hash_shift, |
104 | nentries); |
105 | } |
106 | |
107 | static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
108 | unsigned long tsb, unsigned long nentries, |
109 | unsigned int hugepage_shift) |
110 | { |
111 | unsigned long i; |
112 | |
113 | for (i = 0; i < tb->tlb_nr; i++) |
114 | __flush_huge_tsb_one_entry(tsb, v: tb->vaddrs[i], hash_shift, |
115 | nentries, hugepage_shift); |
116 | } |
117 | #endif |
118 | |
119 | void flush_tsb_user(struct tlb_batch *tb) |
120 | { |
121 | struct mm_struct *mm = tb->mm; |
122 | unsigned long nentries, base, flags; |
123 | |
124 | spin_lock_irqsave(&mm->context.lock, flags); |
125 | |
126 | if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { |
127 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
128 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
129 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
130 | base = __pa(base); |
131 | if (tb->hugepage_shift == PAGE_SHIFT) |
132 | __flush_tsb_one(tb, PAGE_SHIFT, tsb: base, nentries); |
133 | #if defined(CONFIG_HUGETLB_PAGE) |
134 | else |
135 | __flush_huge_tsb_one(tb, PAGE_SHIFT, tsb: base, nentries, |
136 | hugepage_shift: tb->hugepage_shift); |
137 | #endif |
138 | } |
139 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
140 | else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
141 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
142 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
143 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
144 | base = __pa(base); |
145 | __flush_huge_tsb_one(tb, hash_shift: REAL_HPAGE_SHIFT, tsb: base, nentries, |
146 | hugepage_shift: tb->hugepage_shift); |
147 | } |
148 | #endif |
149 | spin_unlock_irqrestore(lock: &mm->context.lock, flags); |
150 | } |
151 | |
152 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, |
153 | unsigned int hugepage_shift) |
154 | { |
155 | unsigned long nentries, base, flags; |
156 | |
157 | spin_lock_irqsave(&mm->context.lock, flags); |
158 | |
159 | if (hugepage_shift < REAL_HPAGE_SHIFT) { |
160 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
161 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
162 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
163 | base = __pa(base); |
164 | if (hugepage_shift == PAGE_SHIFT) |
165 | __flush_tsb_one_entry(tsb: base, v: vaddr, PAGE_SHIFT, |
166 | nentries); |
167 | #if defined(CONFIG_HUGETLB_PAGE) |
168 | else |
169 | __flush_huge_tsb_one_entry(tsb: base, v: vaddr, PAGE_SHIFT, |
170 | nentries, hugepage_shift); |
171 | #endif |
172 | } |
173 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
174 | else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
175 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
176 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
177 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
178 | base = __pa(base); |
179 | __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, |
180 | nentries, hugepage_shift); |
181 | } |
182 | #endif |
183 | spin_unlock_irqrestore(lock: &mm->context.lock, flags); |
184 | } |
185 | |
186 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
187 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
188 | |
189 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
190 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB |
191 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB |
192 | #endif |
193 | |
194 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) |
195 | { |
196 | unsigned long tsb_reg, base, tsb_paddr; |
197 | unsigned long page_sz, tte; |
198 | |
199 | mm->context.tsb_block[tsb_idx].tsb_nentries = |
200 | tsb_bytes / sizeof(struct tsb); |
201 | |
202 | switch (tsb_idx) { |
203 | case MM_TSB_BASE: |
204 | base = TSBMAP_8K_BASE; |
205 | break; |
206 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
207 | case MM_TSB_HUGE: |
208 | base = TSBMAP_4M_BASE; |
209 | break; |
210 | #endif |
211 | default: |
212 | BUG(); |
213 | } |
214 | |
215 | tte = pgprot_val(PAGE_KERNEL_LOCKED); |
216 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); |
217 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); |
218 | |
219 | /* Use the smallest page size that can map the whole TSB |
220 | * in one TLB entry. |
221 | */ |
222 | switch (tsb_bytes) { |
223 | case 8192 << 0: |
224 | tsb_reg = 0x0UL; |
225 | #ifdef DCACHE_ALIASING_POSSIBLE |
226 | base += (tsb_paddr & 8192); |
227 | #endif |
228 | page_sz = 8192; |
229 | break; |
230 | |
231 | case 8192 << 1: |
232 | tsb_reg = 0x1UL; |
233 | page_sz = 64 * 1024; |
234 | break; |
235 | |
236 | case 8192 << 2: |
237 | tsb_reg = 0x2UL; |
238 | page_sz = 64 * 1024; |
239 | break; |
240 | |
241 | case 8192 << 3: |
242 | tsb_reg = 0x3UL; |
243 | page_sz = 64 * 1024; |
244 | break; |
245 | |
246 | case 8192 << 4: |
247 | tsb_reg = 0x4UL; |
248 | page_sz = 512 * 1024; |
249 | break; |
250 | |
251 | case 8192 << 5: |
252 | tsb_reg = 0x5UL; |
253 | page_sz = 512 * 1024; |
254 | break; |
255 | |
256 | case 8192 << 6: |
257 | tsb_reg = 0x6UL; |
258 | page_sz = 512 * 1024; |
259 | break; |
260 | |
261 | case 8192 << 7: |
262 | tsb_reg = 0x7UL; |
263 | page_sz = 4 * 1024 * 1024; |
264 | break; |
265 | |
266 | default: |
267 | printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n" , |
268 | current->comm, current->pid, tsb_bytes); |
269 | BUG(); |
270 | } |
271 | tte |= pte_sz_bits(page_sz); |
272 | |
273 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
274 | /* Physical mapping, no locked TLB entry for TSB. */ |
275 | tsb_reg |= tsb_paddr; |
276 | |
277 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; |
278 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; |
279 | mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; |
280 | } else { |
281 | tsb_reg |= base; |
282 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); |
283 | tte |= (tsb_paddr & ~(page_sz - 1UL)); |
284 | |
285 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; |
286 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; |
287 | mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; |
288 | } |
289 | |
290 | /* Setup the Hypervisor TSB descriptor. */ |
291 | if (tlb_type == hypervisor) { |
292 | struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; |
293 | |
294 | switch (tsb_idx) { |
295 | case MM_TSB_BASE: |
296 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; |
297 | break; |
298 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
299 | case MM_TSB_HUGE: |
300 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; |
301 | break; |
302 | #endif |
303 | default: |
304 | BUG(); |
305 | } |
306 | hp->assoc = 1; |
307 | hp->num_ttes = tsb_bytes / 16; |
308 | hp->ctx_idx = 0; |
309 | switch (tsb_idx) { |
310 | case MM_TSB_BASE: |
311 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; |
312 | break; |
313 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
314 | case MM_TSB_HUGE: |
315 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; |
316 | break; |
317 | #endif |
318 | default: |
319 | BUG(); |
320 | } |
321 | hp->tsb_base = tsb_paddr; |
322 | hp->resv = 0; |
323 | } |
324 | } |
325 | |
326 | struct kmem_cache *pgtable_cache __read_mostly; |
327 | |
328 | static struct kmem_cache *tsb_caches[8] __read_mostly; |
329 | |
330 | static const char *tsb_cache_names[8] = { |
331 | "tsb_8KB" , |
332 | "tsb_16KB" , |
333 | "tsb_32KB" , |
334 | "tsb_64KB" , |
335 | "tsb_128KB" , |
336 | "tsb_256KB" , |
337 | "tsb_512KB" , |
338 | "tsb_1MB" , |
339 | }; |
340 | |
341 | void __init pgtable_cache_init(void) |
342 | { |
343 | unsigned long i; |
344 | |
345 | pgtable_cache = kmem_cache_create("pgtable_cache" , |
346 | PAGE_SIZE, PAGE_SIZE, |
347 | 0, |
348 | _clear_page); |
349 | if (!pgtable_cache) { |
350 | prom_printf("pgtable_cache_init(): Could not create!\n" ); |
351 | prom_halt(); |
352 | } |
353 | |
354 | for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { |
355 | unsigned long size = 8192 << i; |
356 | const char *name = tsb_cache_names[i]; |
357 | |
358 | tsb_caches[i] = kmem_cache_create(name, |
359 | size, align: size, |
360 | flags: 0, NULL); |
361 | if (!tsb_caches[i]) { |
362 | prom_printf("Could not create %s cache\n" , name); |
363 | prom_halt(); |
364 | } |
365 | } |
366 | } |
367 | |
368 | int sysctl_tsb_ratio = -2; |
369 | |
370 | static unsigned long (unsigned long new_size) |
371 | { |
372 | unsigned long num_ents = (new_size / sizeof(struct tsb)); |
373 | |
374 | if (sysctl_tsb_ratio < 0) |
375 | return num_ents - (num_ents >> -sysctl_tsb_ratio); |
376 | else |
377 | return num_ents + (num_ents >> sysctl_tsb_ratio); |
378 | } |
379 | |
380 | /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, |
381 | * do_sparc64_fault() invokes this routine to try and grow it. |
382 | * |
383 | * When we reach the maximum TSB size supported, we stick ~0UL into |
384 | * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() |
385 | * will not trigger any longer. |
386 | * |
387 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers |
388 | * of two. The TSB must be aligned to its size, so f.e. a 512K TSB |
389 | * must be 512K aligned. It also must be physically contiguous, so we |
390 | * cannot use vmalloc(). |
391 | * |
392 | * The idea here is to grow the TSB when the RSS of the process approaches |
393 | * the number of entries that the current TSB can hold at once. Currently, |
394 | * we trigger when the RSS hits 3/4 of the TSB capacity. |
395 | */ |
396 | void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long ) |
397 | { |
398 | unsigned long max_tsb_size = 1 * 1024 * 1024; |
399 | unsigned long new_size, old_size, flags; |
400 | struct tsb *old_tsb, *new_tsb; |
401 | unsigned long new_cache_index, old_cache_index; |
402 | unsigned long ; |
403 | gfp_t gfp_flags; |
404 | |
405 | if (max_tsb_size > PAGE_SIZE << MAX_PAGE_ORDER) |
406 | max_tsb_size = PAGE_SIZE << MAX_PAGE_ORDER; |
407 | |
408 | new_cache_index = 0; |
409 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { |
410 | new_rss_limit = tsb_size_to_rss_limit(new_size); |
411 | if (new_rss_limit > rss) |
412 | break; |
413 | new_cache_index++; |
414 | } |
415 | |
416 | if (new_size == max_tsb_size) |
417 | new_rss_limit = ~0UL; |
418 | |
419 | retry_tsb_alloc: |
420 | gfp_flags = GFP_KERNEL; |
421 | if (new_size > (PAGE_SIZE * 2)) |
422 | gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; |
423 | |
424 | new_tsb = kmem_cache_alloc_node(s: tsb_caches[new_cache_index], |
425 | flags: gfp_flags, node: numa_node_id()); |
426 | if (unlikely(!new_tsb)) { |
427 | /* Not being able to fork due to a high-order TSB |
428 | * allocation failure is very bad behavior. Just back |
429 | * down to a 0-order allocation and force no TSB |
430 | * growing for this address space. |
431 | */ |
432 | if (mm->context.tsb_block[tsb_index].tsb == NULL && |
433 | new_cache_index > 0) { |
434 | new_cache_index = 0; |
435 | new_size = 8192; |
436 | new_rss_limit = ~0UL; |
437 | goto retry_tsb_alloc; |
438 | } |
439 | |
440 | /* If we failed on a TSB grow, we are under serious |
441 | * memory pressure so don't try to grow any more. |
442 | */ |
443 | if (mm->context.tsb_block[tsb_index].tsb != NULL) |
444 | mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; |
445 | return; |
446 | } |
447 | |
448 | /* Mark all tags as invalid. */ |
449 | tsb_init(new_tsb, new_size); |
450 | |
451 | /* Ok, we are about to commit the changes. If we are |
452 | * growing an existing TSB the locking is very tricky, |
453 | * so WATCH OUT! |
454 | * |
455 | * We have to hold mm->context.lock while committing to the |
456 | * new TSB, this synchronizes us with processors in |
457 | * flush_tsb_user() and switch_mm() for this address space. |
458 | * |
459 | * But even with that lock held, processors run asynchronously |
460 | * accessing the old TSB via TLB miss handling. This is OK |
461 | * because those actions are just propagating state from the |
462 | * Linux page tables into the TSB, page table mappings are not |
463 | * being changed. If a real fault occurs, the processor will |
464 | * synchronize with us when it hits flush_tsb_user(), this is |
465 | * also true for the case where vmscan is modifying the page |
466 | * tables. The only thing we need to be careful with is to |
467 | * skip any locked TSB entries during copy_tsb(). |
468 | * |
469 | * When we finish committing to the new TSB, we have to drop |
470 | * the lock and ask all other cpus running this address space |
471 | * to run tsb_context_switch() to see the new TSB table. |
472 | */ |
473 | spin_lock_irqsave(&mm->context.lock, flags); |
474 | |
475 | old_tsb = mm->context.tsb_block[tsb_index].tsb; |
476 | old_cache_index = |
477 | (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); |
478 | old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * |
479 | sizeof(struct tsb)); |
480 | |
481 | |
482 | /* Handle multiple threads trying to grow the TSB at the same time. |
483 | * One will get in here first, and bump the size and the RSS limit. |
484 | * The others will get in here next and hit this check. |
485 | */ |
486 | if (unlikely(old_tsb && |
487 | (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { |
488 | spin_unlock_irqrestore(lock: &mm->context.lock, flags); |
489 | |
490 | kmem_cache_free(s: tsb_caches[new_cache_index], objp: new_tsb); |
491 | return; |
492 | } |
493 | |
494 | mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; |
495 | |
496 | if (old_tsb) { |
497 | extern void copy_tsb(unsigned long old_tsb_base, |
498 | unsigned long old_tsb_size, |
499 | unsigned long new_tsb_base, |
500 | unsigned long new_tsb_size, |
501 | unsigned long page_size_shift); |
502 | unsigned long old_tsb_base = (unsigned long) old_tsb; |
503 | unsigned long new_tsb_base = (unsigned long) new_tsb; |
504 | |
505 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
506 | old_tsb_base = __pa(old_tsb_base); |
507 | new_tsb_base = __pa(new_tsb_base); |
508 | } |
509 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, |
510 | tsb_index == MM_TSB_BASE ? |
511 | PAGE_SHIFT : REAL_HPAGE_SHIFT); |
512 | } |
513 | |
514 | mm->context.tsb_block[tsb_index].tsb = new_tsb; |
515 | setup_tsb_params(mm, tsb_idx: tsb_index, tsb_bytes: new_size); |
516 | |
517 | spin_unlock_irqrestore(lock: &mm->context.lock, flags); |
518 | |
519 | /* If old_tsb is NULL, we're being invoked for the first time |
520 | * from init_new_context(). |
521 | */ |
522 | if (old_tsb) { |
523 | /* Reload it on the local cpu. */ |
524 | tsb_context_switch(mm); |
525 | |
526 | /* Now force other processors to do the same. */ |
527 | preempt_disable(); |
528 | smp_tsb_sync(mm); |
529 | preempt_enable(); |
530 | |
531 | /* Now it is safe to free the old tsb. */ |
532 | kmem_cache_free(s: tsb_caches[old_cache_index], objp: old_tsb); |
533 | } |
534 | } |
535 | |
536 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
537 | { |
538 | unsigned long = get_mm_rss(mm); |
539 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
540 | unsigned long saved_hugetlb_pte_count; |
541 | unsigned long saved_thp_pte_count; |
542 | #endif |
543 | unsigned int i; |
544 | |
545 | spin_lock_init(&mm->context.lock); |
546 | |
547 | mm->context.sparc64_ctx_val = 0UL; |
548 | |
549 | mm->context.tag_store = NULL; |
550 | spin_lock_init(&mm->context.tag_lock); |
551 | |
552 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
553 | /* We reset them to zero because the fork() page copying |
554 | * will re-increment the counters as the parent PTEs are |
555 | * copied into the child address space. |
556 | */ |
557 | saved_hugetlb_pte_count = mm->context.hugetlb_pte_count; |
558 | saved_thp_pte_count = mm->context.thp_pte_count; |
559 | mm->context.hugetlb_pte_count = 0; |
560 | mm->context.thp_pte_count = 0; |
561 | |
562 | mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE); |
563 | #endif |
564 | |
565 | /* copy_mm() copies over the parent's mm_struct before calling |
566 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
567 | * will be confused and think there is an older TSB to free up. |
568 | */ |
569 | for (i = 0; i < MM_NUM_TSBS; i++) |
570 | mm->context.tsb_block[i].tsb = NULL; |
571 | |
572 | /* If this is fork, inherit the parent's TSB size. We would |
573 | * grow it to that size on the first page fault anyways. |
574 | */ |
575 | tsb_grow(mm, MM_TSB_BASE, mm_rss); |
576 | |
577 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
578 | if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count)) |
579 | tsb_grow(mm, MM_TSB_HUGE, |
580 | (saved_hugetlb_pte_count + saved_thp_pte_count) * |
581 | REAL_HPAGE_PER_HPAGE); |
582 | #endif |
583 | |
584 | if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) |
585 | return -ENOMEM; |
586 | |
587 | return 0; |
588 | } |
589 | |
590 | static void tsb_destroy_one(struct tsb_config *tp) |
591 | { |
592 | unsigned long cache_index; |
593 | |
594 | if (!tp->tsb) |
595 | return; |
596 | cache_index = tp->tsb_reg_val & 0x7UL; |
597 | kmem_cache_free(s: tsb_caches[cache_index], objp: tp->tsb); |
598 | tp->tsb = NULL; |
599 | tp->tsb_reg_val = 0UL; |
600 | } |
601 | |
602 | void destroy_context(struct mm_struct *mm) |
603 | { |
604 | unsigned long flags, i; |
605 | |
606 | for (i = 0; i < MM_NUM_TSBS; i++) |
607 | tsb_destroy_one(&mm->context.tsb_block[i]); |
608 | |
609 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
610 | |
611 | if (CTX_VALID(mm->context)) { |
612 | unsigned long nr = CTX_NRBITS(mm->context); |
613 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); |
614 | } |
615 | |
616 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
617 | |
618 | /* If ADI tag storage was allocated for this task, free it */ |
619 | if (mm->context.tag_store) { |
620 | tag_storage_desc_t *tag_desc; |
621 | unsigned long max_desc; |
622 | unsigned char *tags; |
623 | |
624 | tag_desc = mm->context.tag_store; |
625 | max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); |
626 | for (i = 0; i < max_desc; i++) { |
627 | tags = tag_desc->tags; |
628 | tag_desc->tags = NULL; |
629 | kfree(objp: tags); |
630 | tag_desc++; |
631 | } |
632 | kfree(objp: mm->context.tag_store); |
633 | mm->context.tag_store = NULL; |
634 | } |
635 | } |
636 | |