1 | /* Completion of TCB initialization after TLS_INIT_TP. NPTL version. |
2 | Copyright (C) 2020-2024 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <kernel-features.h> |
20 | #include <ldsodefs.h> |
21 | #include <list.h> |
22 | #include <pthreadP.h> |
23 | #include <tls.h> |
24 | #include <rseq-internal.h> |
25 | #include <thread_pointer.h> |
26 | |
27 | #define TUNABLE_NAMESPACE pthread |
28 | #include <dl-tunables.h> |
29 | |
30 | #ifndef __ASSUME_SET_ROBUST_LIST |
31 | bool __nptl_set_robust_list_avail; |
32 | rtld_hidden_data_def (__nptl_set_robust_list_avail) |
33 | #endif |
34 | |
35 | bool __nptl_initial_report_events; |
36 | rtld_hidden_def (__nptl_initial_report_events) |
37 | |
38 | #ifdef SHARED |
39 | /* Dummy implementation. See __rtld_mutex_init. */ |
40 | static int |
41 | rtld_mutex_dummy (pthread_mutex_t *lock) |
42 | { |
43 | return 0; |
44 | } |
45 | #endif |
46 | |
47 | const unsigned int __rseq_flags; |
48 | const unsigned int __rseq_size attribute_relro; |
49 | const ptrdiff_t __rseq_offset attribute_relro; |
50 | |
51 | void |
52 | __tls_pre_init_tp (void) |
53 | { |
54 | /* The list data structures are not consistent until |
55 | initialized. */ |
56 | INIT_LIST_HEAD (&GL (dl_stack_used)); |
57 | INIT_LIST_HEAD (&GL (dl_stack_user)); |
58 | INIT_LIST_HEAD (&GL (dl_stack_cache)); |
59 | |
60 | #ifdef SHARED |
61 | ___rtld_mutex_lock = rtld_mutex_dummy; |
62 | ___rtld_mutex_unlock = rtld_mutex_dummy; |
63 | #endif |
64 | } |
65 | |
66 | void |
67 | __tls_init_tp (void) |
68 | { |
69 | struct pthread *pd = THREAD_SELF; |
70 | |
71 | /* Set up thread stack list management. */ |
72 | list_add (newp: &pd->list, head: &GL (dl_stack_user)); |
73 | |
74 | /* Early initialization of the TCB. */ |
75 | pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid); |
76 | THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); |
77 | THREAD_SETMEM (pd, user_stack, true); |
78 | |
79 | /* Before initializing GL (dl_stack_user), the debugger could not |
80 | find us and had to set __nptl_initial_report_events. Propagate |
81 | its setting. */ |
82 | THREAD_SETMEM (pd, report_events, __nptl_initial_report_events); |
83 | |
84 | /* Initialize the robust mutex data. */ |
85 | { |
86 | #if __PTHREAD_MUTEX_HAVE_PREV |
87 | pd->robust_prev = &pd->robust_head; |
88 | #endif |
89 | pd->robust_head.list = &pd->robust_head; |
90 | pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) |
91 | - offsetof (pthread_mutex_t, |
92 | __data.__list.__next)); |
93 | int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head, |
94 | sizeof (struct robust_list_head)); |
95 | if (!INTERNAL_SYSCALL_ERROR_P (res)) |
96 | { |
97 | #ifndef __ASSUME_SET_ROBUST_LIST |
98 | __nptl_set_robust_list_avail = true; |
99 | #endif |
100 | } |
101 | } |
102 | |
103 | { |
104 | bool do_rseq = true; |
105 | do_rseq = TUNABLE_GET (rseq, int, NULL); |
106 | if (rseq_register_current_thread (self: pd, do_rseq)) |
107 | { |
108 | /* We need a writable view of the variables. They are in |
109 | .data.relro and are not yet write-protected. */ |
110 | extern unsigned int size __asm__ ("__rseq_size" ); |
111 | size = sizeof (pd->rseq_area); |
112 | } |
113 | |
114 | #ifdef RSEQ_SIG |
115 | /* This should be a compile-time constant, but the current |
116 | infrastructure makes it difficult to determine its value. Not |
117 | all targets support __thread_pointer, so set __rseq_offset only |
118 | if the rseq registration may have happened because RSEQ_SIG is |
119 | defined. */ |
120 | extern ptrdiff_t offset __asm__ ("__rseq_offset" ); |
121 | offset = (char *) &pd->rseq_area - (char *) __thread_pointer (); |
122 | #endif |
123 | } |
124 | |
125 | /* Set initial thread's stack block from 0 up to __libc_stack_end. |
126 | It will be bigger than it actually is, but for unwind.c/pt-longjmp.c |
127 | purposes this is good enough. */ |
128 | THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end); |
129 | } |
130 | |