1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SCHED_TASK_STACK_H |
3 | #define _LINUX_SCHED_TASK_STACK_H |
4 | |
5 | /* |
6 | * task->stack (kernel stack) handling interfaces: |
7 | */ |
8 | |
9 | #include <linux/sched.h> |
10 | #include <linux/magic.h> |
11 | #include <linux/refcount.h> |
12 | |
13 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
14 | |
15 | /* |
16 | * When accessing the stack of a non-current task that might exit, use |
17 | * try_get_task_stack() instead. task_stack_page will return a pointer |
18 | * that could get freed out from under you. |
19 | */ |
20 | static __always_inline void *task_stack_page(const struct task_struct *task) |
21 | { |
22 | return task->stack; |
23 | } |
24 | |
25 | #define setup_thread_stack(new,old) do { } while(0) |
26 | |
27 | static __always_inline unsigned long *end_of_stack(const struct task_struct *task) |
28 | { |
29 | #ifdef CONFIG_STACK_GROWSUP |
30 | return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; |
31 | #else |
32 | return task->stack; |
33 | #endif |
34 | } |
35 | |
36 | #elif !defined(__HAVE_THREAD_FUNCTIONS) |
37 | |
38 | #define task_stack_page(task) ((void *)(task)->stack) |
39 | |
40 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) |
41 | { |
42 | *task_thread_info(p) = *task_thread_info(org); |
43 | task_thread_info(p)->task = p; |
44 | } |
45 | |
46 | /* |
47 | * Return the address of the last usable long on the stack. |
48 | * |
49 | * When the stack grows down, this is just above the thread |
50 | * info struct. Going any lower will corrupt the threadinfo. |
51 | * |
52 | * When the stack grows up, this is the highest address. |
53 | * Beyond that position, we corrupt data on the next page. |
54 | */ |
55 | static inline unsigned long *end_of_stack(struct task_struct *p) |
56 | { |
57 | #ifdef CONFIG_STACK_GROWSUP |
58 | return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; |
59 | #else |
60 | return (unsigned long *)(task_thread_info(p) + 1); |
61 | #endif |
62 | } |
63 | |
64 | #endif |
65 | |
66 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
67 | static inline void *try_get_task_stack(struct task_struct *tsk) |
68 | { |
69 | return refcount_inc_not_zero(r: &tsk->stack_refcount) ? |
70 | task_stack_page(task: tsk) : NULL; |
71 | } |
72 | |
73 | extern void put_task_stack(struct task_struct *tsk); |
74 | #else |
75 | static inline void *try_get_task_stack(struct task_struct *tsk) |
76 | { |
77 | return task_stack_page(tsk); |
78 | } |
79 | |
80 | static inline void put_task_stack(struct task_struct *tsk) {} |
81 | #endif |
82 | |
83 | void exit_task_stack_account(struct task_struct *tsk); |
84 | |
85 | #define task_stack_end_corrupted(task) \ |
86 | (*(end_of_stack(task)) != STACK_END_MAGIC) |
87 | |
88 | static inline int object_is_on_stack(const void *obj) |
89 | { |
90 | void *stack = task_stack_page(current); |
91 | |
92 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); |
93 | } |
94 | |
95 | extern void thread_stack_cache_init(void); |
96 | |
97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
98 | static inline unsigned long stack_not_used(struct task_struct *p) |
99 | { |
100 | unsigned long *n = end_of_stack(task: p); |
101 | |
102 | do { /* Skip over canary */ |
103 | # ifdef CONFIG_STACK_GROWSUP |
104 | n--; |
105 | # else |
106 | n++; |
107 | # endif |
108 | } while (!*n); |
109 | |
110 | # ifdef CONFIG_STACK_GROWSUP |
111 | return (unsigned long)end_of_stack(p) - (unsigned long)n; |
112 | # else |
113 | return (unsigned long)n - (unsigned long)end_of_stack(task: p); |
114 | # endif |
115 | } |
116 | #endif |
117 | extern void set_task_stack_end_magic(struct task_struct *tsk); |
118 | |
119 | #ifndef __HAVE_ARCH_KSTACK_END |
120 | static inline int kstack_end(void *addr) |
121 | { |
122 | /* Reliable end of stack detection: |
123 | * Some APM bios versions misalign the stack |
124 | */ |
125 | return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); |
126 | } |
127 | #endif |
128 | |
129 | #endif /* _LINUX_SCHED_TASK_STACK_H */ |
130 | |