1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 1992 Darren Senn |
4 | */ |
5 | |
6 | /* These are all the functions necessary to implement itimers */ |
7 | |
8 | #include <linux/mm.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/syscalls.h> |
11 | #include <linux/time.h> |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/sched/cputime.h> |
14 | #include <linux/posix-timers.h> |
15 | #include <linux/hrtimer.h> |
16 | #include <trace/events/timer.h> |
17 | #include <linux/compat.h> |
18 | |
19 | #include <linux/uaccess.h> |
20 | |
21 | /** |
22 | * itimer_get_remtime - get remaining time for the timer |
23 | * |
24 | * @timer: the timer to read |
25 | * |
26 | * Returns the delta between the expiry time and now, which can be |
27 | * less than zero or 1usec for an pending expired timer |
28 | */ |
29 | static struct timespec64 itimer_get_remtime(struct hrtimer *timer) |
30 | { |
31 | ktime_t rem = __hrtimer_get_remaining(timer, adjust: true); |
32 | |
33 | /* |
34 | * Racy but safe: if the itimer expires after the above |
35 | * hrtimer_get_remtime() call but before this condition |
36 | * then we return 0 - which is correct. |
37 | */ |
38 | if (hrtimer_active(timer)) { |
39 | if (rem <= 0) |
40 | rem = NSEC_PER_USEC; |
41 | } else |
42 | rem = 0; |
43 | |
44 | return ktime_to_timespec64(rem); |
45 | } |
46 | |
47 | static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, |
48 | struct itimerspec64 *const value) |
49 | { |
50 | u64 val, interval; |
51 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; |
52 | |
53 | spin_lock_irq(lock: &tsk->sighand->siglock); |
54 | |
55 | val = it->expires; |
56 | interval = it->incr; |
57 | if (val) { |
58 | u64 t, samples[CPUCLOCK_MAX]; |
59 | |
60 | thread_group_sample_cputime(tsk, samples); |
61 | t = samples[clock_id]; |
62 | |
63 | if (val < t) |
64 | /* about to fire */ |
65 | val = TICK_NSEC; |
66 | else |
67 | val -= t; |
68 | } |
69 | |
70 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
71 | |
72 | value->it_value = ns_to_timespec64(nsec: val); |
73 | value->it_interval = ns_to_timespec64(nsec: interval); |
74 | } |
75 | |
76 | static int do_getitimer(int which, struct itimerspec64 *value) |
77 | { |
78 | struct task_struct *tsk = current; |
79 | |
80 | switch (which) { |
81 | case ITIMER_REAL: |
82 | spin_lock_irq(lock: &tsk->sighand->siglock); |
83 | value->it_value = itimer_get_remtime(timer: &tsk->signal->real_timer); |
84 | value->it_interval = |
85 | ktime_to_timespec64(tsk->signal->it_real_incr); |
86 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
87 | break; |
88 | case ITIMER_VIRTUAL: |
89 | get_cpu_itimer(tsk, CPUCLOCK_VIRT, value); |
90 | break; |
91 | case ITIMER_PROF: |
92 | get_cpu_itimer(tsk, CPUCLOCK_PROF, value); |
93 | break; |
94 | default: |
95 | return(-EINVAL); |
96 | } |
97 | return 0; |
98 | } |
99 | |
100 | static int put_itimerval(struct __kernel_old_itimerval __user *o, |
101 | const struct itimerspec64 *i) |
102 | { |
103 | struct __kernel_old_itimerval v; |
104 | |
105 | v.it_interval.tv_sec = i->it_interval.tv_sec; |
106 | v.it_interval.tv_usec = i->it_interval.tv_nsec / NSEC_PER_USEC; |
107 | v.it_value.tv_sec = i->it_value.tv_sec; |
108 | v.it_value.tv_usec = i->it_value.tv_nsec / NSEC_PER_USEC; |
109 | return copy_to_user(to: o, from: &v, n: sizeof(struct __kernel_old_itimerval)) ? -EFAULT : 0; |
110 | } |
111 | |
112 | |
113 | SYSCALL_DEFINE2(getitimer, int, which, struct __kernel_old_itimerval __user *, value) |
114 | { |
115 | struct itimerspec64 get_buffer; |
116 | int error = do_getitimer(which, value: &get_buffer); |
117 | |
118 | if (!error && put_itimerval(o: value, i: &get_buffer)) |
119 | error = -EFAULT; |
120 | return error; |
121 | } |
122 | |
123 | #if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA) |
124 | struct old_itimerval32 { |
125 | struct old_timeval32 it_interval; |
126 | struct old_timeval32 it_value; |
127 | }; |
128 | |
129 | static int put_old_itimerval32(struct old_itimerval32 __user *o, |
130 | const struct itimerspec64 *i) |
131 | { |
132 | struct old_itimerval32 v32; |
133 | |
134 | v32.it_interval.tv_sec = i->it_interval.tv_sec; |
135 | v32.it_interval.tv_usec = i->it_interval.tv_nsec / NSEC_PER_USEC; |
136 | v32.it_value.tv_sec = i->it_value.tv_sec; |
137 | v32.it_value.tv_usec = i->it_value.tv_nsec / NSEC_PER_USEC; |
138 | return copy_to_user(to: o, from: &v32, n: sizeof(struct old_itimerval32)) ? -EFAULT : 0; |
139 | } |
140 | |
141 | COMPAT_SYSCALL_DEFINE2(getitimer, int, which, |
142 | struct old_itimerval32 __user *, value) |
143 | { |
144 | struct itimerspec64 get_buffer; |
145 | int error = do_getitimer(which, value: &get_buffer); |
146 | |
147 | if (!error && put_old_itimerval32(o: value, i: &get_buffer)) |
148 | error = -EFAULT; |
149 | return error; |
150 | } |
151 | #endif |
152 | |
153 | /* |
154 | * Invoked from dequeue_signal() when SIG_ALRM is delivered. |
155 | * |
156 | * Restart the ITIMER_REAL timer if it is armed as periodic timer. Doing |
157 | * this in the signal delivery path instead of self rearming prevents a DoS |
158 | * with small increments in the high reolution timer case and reduces timer |
159 | * noise in general. |
160 | */ |
161 | void posixtimer_rearm_itimer(struct task_struct *tsk) |
162 | { |
163 | struct hrtimer *tmr = &tsk->signal->real_timer; |
164 | |
165 | if (!hrtimer_is_queued(timer: tmr) && tsk->signal->it_real_incr != 0) { |
166 | hrtimer_forward(timer: tmr, now: tmr->base->get_time(), |
167 | interval: tsk->signal->it_real_incr); |
168 | hrtimer_restart(timer: tmr); |
169 | } |
170 | } |
171 | |
172 | /* |
173 | * Interval timers are restarted in the signal delivery path. See |
174 | * posixtimer_rearm_itimer(). |
175 | */ |
176 | enum hrtimer_restart it_real_fn(struct hrtimer *timer) |
177 | { |
178 | struct signal_struct *sig = |
179 | container_of(timer, struct signal_struct, real_timer); |
180 | struct pid *leader_pid = sig->pids[PIDTYPE_TGID]; |
181 | |
182 | trace_itimer_expire(ITIMER_REAL, pid: leader_pid, now: 0); |
183 | kill_pid_info(SIGALRM, SEND_SIG_PRIV, pid: leader_pid); |
184 | |
185 | return HRTIMER_NORESTART; |
186 | } |
187 | |
188 | static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, |
189 | const struct itimerspec64 *const value, |
190 | struct itimerspec64 *const ovalue) |
191 | { |
192 | u64 oval, nval, ointerval, ninterval; |
193 | struct cpu_itimer *it = &tsk->signal->it[clock_id]; |
194 | |
195 | nval = timespec64_to_ns(ts: &value->it_value); |
196 | ninterval = timespec64_to_ns(ts: &value->it_interval); |
197 | |
198 | spin_lock_irq(lock: &tsk->sighand->siglock); |
199 | |
200 | oval = it->expires; |
201 | ointerval = it->incr; |
202 | if (oval || nval) { |
203 | if (nval > 0) |
204 | nval += TICK_NSEC; |
205 | set_process_cpu_timer(task: tsk, clock_idx: clock_id, newval: &nval, oldval: &oval); |
206 | } |
207 | it->expires = nval; |
208 | it->incr = ninterval; |
209 | trace_itimer_state(which: clock_id == CPUCLOCK_VIRT ? |
210 | ITIMER_VIRTUAL : ITIMER_PROF, value, expires: nval); |
211 | |
212 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
213 | |
214 | if (ovalue) { |
215 | ovalue->it_value = ns_to_timespec64(nsec: oval); |
216 | ovalue->it_interval = ns_to_timespec64(nsec: ointerval); |
217 | } |
218 | } |
219 | |
220 | /* |
221 | * Returns true if the timeval is in canonical form |
222 | */ |
223 | #define timeval_valid(t) \ |
224 | (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC)) |
225 | |
226 | static int do_setitimer(int which, struct itimerspec64 *value, |
227 | struct itimerspec64 *ovalue) |
228 | { |
229 | struct task_struct *tsk = current; |
230 | struct hrtimer *timer; |
231 | ktime_t expires; |
232 | |
233 | switch (which) { |
234 | case ITIMER_REAL: |
235 | again: |
236 | spin_lock_irq(lock: &tsk->sighand->siglock); |
237 | timer = &tsk->signal->real_timer; |
238 | if (ovalue) { |
239 | ovalue->it_value = itimer_get_remtime(timer); |
240 | ovalue->it_interval |
241 | = ktime_to_timespec64(tsk->signal->it_real_incr); |
242 | } |
243 | /* We are sharing ->siglock with it_real_fn() */ |
244 | if (hrtimer_try_to_cancel(timer) < 0) { |
245 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
246 | hrtimer_cancel_wait_running(timer); |
247 | goto again; |
248 | } |
249 | expires = timespec64_to_ktime(ts: value->it_value); |
250 | if (expires != 0) { |
251 | tsk->signal->it_real_incr = |
252 | timespec64_to_ktime(ts: value->it_interval); |
253 | hrtimer_start(timer, tim: expires, mode: HRTIMER_MODE_REL); |
254 | } else |
255 | tsk->signal->it_real_incr = 0; |
256 | |
257 | trace_itimer_state(ITIMER_REAL, value, expires: 0); |
258 | spin_unlock_irq(lock: &tsk->sighand->siglock); |
259 | break; |
260 | case ITIMER_VIRTUAL: |
261 | set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue); |
262 | break; |
263 | case ITIMER_PROF: |
264 | set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue); |
265 | break; |
266 | default: |
267 | return -EINVAL; |
268 | } |
269 | return 0; |
270 | } |
271 | |
272 | #ifdef CONFIG_SECURITY_SELINUX |
273 | void clear_itimer(void) |
274 | { |
275 | struct itimerspec64 v = {}; |
276 | int i; |
277 | |
278 | for (i = 0; i < 3; i++) |
279 | do_setitimer(which: i, value: &v, NULL); |
280 | } |
281 | #endif |
282 | |
283 | #ifdef __ARCH_WANT_SYS_ALARM |
284 | |
285 | /** |
286 | * alarm_setitimer - set alarm in seconds |
287 | * |
288 | * @seconds: number of seconds until alarm |
289 | * 0 disables the alarm |
290 | * |
291 | * Returns the remaining time in seconds of a pending timer or 0 when |
292 | * the timer is not active. |
293 | * |
294 | * On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid |
295 | * negative timeval settings which would cause immediate expiry. |
296 | */ |
297 | static unsigned int alarm_setitimer(unsigned int seconds) |
298 | { |
299 | struct itimerspec64 it_new, it_old; |
300 | |
301 | #if BITS_PER_LONG < 64 |
302 | if (seconds > INT_MAX) |
303 | seconds = INT_MAX; |
304 | #endif |
305 | it_new.it_value.tv_sec = seconds; |
306 | it_new.it_value.tv_nsec = 0; |
307 | it_new.it_interval.tv_sec = it_new.it_interval.tv_nsec = 0; |
308 | |
309 | do_setitimer(ITIMER_REAL, value: &it_new, ovalue: &it_old); |
310 | |
311 | /* |
312 | * We can't return 0 if we have an alarm pending ... And we'd |
313 | * better return too much than too little anyway |
314 | */ |
315 | if ((!it_old.it_value.tv_sec && it_old.it_value.tv_nsec) || |
316 | it_old.it_value.tv_nsec >= (NSEC_PER_SEC / 2)) |
317 | it_old.it_value.tv_sec++; |
318 | |
319 | return it_old.it_value.tv_sec; |
320 | } |
321 | |
322 | /* |
323 | * For backwards compatibility? This can be done in libc so Alpha |
324 | * and all newer ports shouldn't need it. |
325 | */ |
326 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
327 | { |
328 | return alarm_setitimer(seconds); |
329 | } |
330 | |
331 | #endif |
332 | |
333 | static int get_itimerval(struct itimerspec64 *o, const struct __kernel_old_itimerval __user *i) |
334 | { |
335 | struct __kernel_old_itimerval v; |
336 | |
337 | if (copy_from_user(to: &v, from: i, n: sizeof(struct __kernel_old_itimerval))) |
338 | return -EFAULT; |
339 | |
340 | /* Validate the timevals in value. */ |
341 | if (!timeval_valid(&v.it_value) || |
342 | !timeval_valid(&v.it_interval)) |
343 | return -EINVAL; |
344 | |
345 | o->it_interval.tv_sec = v.it_interval.tv_sec; |
346 | o->it_interval.tv_nsec = v.it_interval.tv_usec * NSEC_PER_USEC; |
347 | o->it_value.tv_sec = v.it_value.tv_sec; |
348 | o->it_value.tv_nsec = v.it_value.tv_usec * NSEC_PER_USEC; |
349 | return 0; |
350 | } |
351 | |
352 | SYSCALL_DEFINE3(setitimer, int, which, struct __kernel_old_itimerval __user *, value, |
353 | struct __kernel_old_itimerval __user *, ovalue) |
354 | { |
355 | struct itimerspec64 set_buffer, get_buffer; |
356 | int error; |
357 | |
358 | if (value) { |
359 | error = get_itimerval(o: &set_buffer, i: value); |
360 | if (error) |
361 | return error; |
362 | } else { |
363 | memset(&set_buffer, 0, sizeof(set_buffer)); |
364 | printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer." |
365 | " Misfeature support will be removed\n" , |
366 | current->comm); |
367 | } |
368 | |
369 | error = do_setitimer(which, value: &set_buffer, ovalue: ovalue ? &get_buffer : NULL); |
370 | if (error || !ovalue) |
371 | return error; |
372 | |
373 | if (put_itimerval(o: ovalue, i: &get_buffer)) |
374 | return -EFAULT; |
375 | return 0; |
376 | } |
377 | |
378 | #if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA) |
379 | static int get_old_itimerval32(struct itimerspec64 *o, const struct old_itimerval32 __user *i) |
380 | { |
381 | struct old_itimerval32 v32; |
382 | |
383 | if (copy_from_user(to: &v32, from: i, n: sizeof(struct old_itimerval32))) |
384 | return -EFAULT; |
385 | |
386 | /* Validate the timevals in value. */ |
387 | if (!timeval_valid(&v32.it_value) || |
388 | !timeval_valid(&v32.it_interval)) |
389 | return -EINVAL; |
390 | |
391 | o->it_interval.tv_sec = v32.it_interval.tv_sec; |
392 | o->it_interval.tv_nsec = v32.it_interval.tv_usec * NSEC_PER_USEC; |
393 | o->it_value.tv_sec = v32.it_value.tv_sec; |
394 | o->it_value.tv_nsec = v32.it_value.tv_usec * NSEC_PER_USEC; |
395 | return 0; |
396 | } |
397 | |
398 | COMPAT_SYSCALL_DEFINE3(setitimer, int, which, |
399 | struct old_itimerval32 __user *, value, |
400 | struct old_itimerval32 __user *, ovalue) |
401 | { |
402 | struct itimerspec64 set_buffer, get_buffer; |
403 | int error; |
404 | |
405 | if (value) { |
406 | error = get_old_itimerval32(o: &set_buffer, i: value); |
407 | if (error) |
408 | return error; |
409 | } else { |
410 | memset(&set_buffer, 0, sizeof(set_buffer)); |
411 | printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer." |
412 | " Misfeature support will be removed\n" , |
413 | current->comm); |
414 | } |
415 | |
416 | error = do_setitimer(which, value: &set_buffer, ovalue: ovalue ? &get_buffer : NULL); |
417 | if (error || !ovalue) |
418 | return error; |
419 | if (put_old_itimerval32(o: ovalue, i: &get_buffer)) |
420 | return -EFAULT; |
421 | return 0; |
422 | } |
423 | #endif |
424 | |