1/*
2 * Copyright © 2008 Kristian Høgsberg
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26#include <assert.h>
27#include <stddef.h>
28#include <stdio.h>
29#include <errno.h>
30#include <signal.h>
31#include <stdlib.h>
32#include <stdint.h>
33#include <stdbool.h>
34#include <string.h>
35#include <fcntl.h>
36#include <sys/socket.h>
37#include <sys/un.h>
38#include <sys/epoll.h>
39#include <sys/signalfd.h>
40#include <sys/timerfd.h>
41#include <unistd.h>
42#include "wayland-util.h"
43#include "wayland-private.h"
44#include "wayland-server-core.h"
45#include "wayland-os.h"
46
47/** \cond INTERNAL */
48
49#define TIMER_REMOVED -2
50
51struct wl_event_loop;
52struct wl_event_source_interface;
53struct wl_event_source_timer;
54
55struct wl_event_source {
56 struct wl_event_source_interface *interface;
57 struct wl_event_loop *loop;
58 struct wl_list link;
59 void *data;
60 int fd;
61};
62
63struct wl_timer_heap {
64 struct wl_event_source base;
65 /* pointers to the user-visible event sources */
66 struct wl_event_source_timer **data;
67 int space, active, count;
68};
69
70struct wl_event_loop {
71 int epoll_fd;
72 struct wl_list check_list;
73 struct wl_list idle_list;
74 struct wl_list destroy_list;
75
76 struct wl_signal destroy_signal;
77
78 struct wl_timer_heap timers;
79};
80
81struct wl_event_source_interface {
82 int (*dispatch)(struct wl_event_source *source,
83 struct epoll_event *ep);
84};
85
86
87struct wl_event_source_fd {
88 struct wl_event_source base;
89 wl_event_loop_fd_func_t func;
90 int fd;
91};
92
93/** \endcond */
94
95static int
96wl_event_source_fd_dispatch(struct wl_event_source *source,
97 struct epoll_event *ep)
98{
99 struct wl_event_source_fd *fd_source = (struct wl_event_source_fd *) source;
100 uint32_t mask;
101
102 mask = 0;
103 if (ep->events & EPOLLIN)
104 mask |= WL_EVENT_READABLE;
105 if (ep->events & EPOLLOUT)
106 mask |= WL_EVENT_WRITABLE;
107 if (ep->events & EPOLLHUP)
108 mask |= WL_EVENT_HANGUP;
109 if (ep->events & EPOLLERR)
110 mask |= WL_EVENT_ERROR;
111
112 return fd_source->func(fd_source->fd, mask, source->data);
113}
114
115struct wl_event_source_interface fd_source_interface = {
116 wl_event_source_fd_dispatch,
117};
118
119static struct wl_event_source *
120add_source(struct wl_event_loop *loop,
121 struct wl_event_source *source, uint32_t mask, void *data)
122{
123 struct epoll_event ep;
124
125 if (source->fd < 0) {
126 free(ptr: source);
127 return NULL;
128 }
129
130 source->loop = loop;
131 source->data = data;
132 wl_list_init(list: &source->link);
133
134 memset(s: &ep, c: 0, n: sizeof ep);
135 if (mask & WL_EVENT_READABLE)
136 ep.events |= EPOLLIN;
137 if (mask & WL_EVENT_WRITABLE)
138 ep.events |= EPOLLOUT;
139 ep.data.ptr = source;
140
141 if (epoll_ctl(epfd: loop->epoll_fd, EPOLL_CTL_ADD, fd: source->fd, event: &ep) < 0) {
142 close(fd: source->fd);
143 free(ptr: source);
144 return NULL;
145 }
146
147 return source;
148}
149
150/** Create a file descriptor event source
151 *
152 * \param loop The event loop that will process the new source.
153 * \param fd The file descriptor to watch.
154 * \param mask A bitwise-or of which events to watch for: \c WL_EVENT_READABLE,
155 * \c WL_EVENT_WRITABLE.
156 * \param func The file descriptor dispatch function.
157 * \param data User data.
158 * \return A new file descriptor event source.
159 *
160 * The given file descriptor is initially watched for the events given in
161 * \c mask. This can be changed as needed with wl_event_source_fd_update().
162 *
163 * If it is possible that program execution causes the file descriptor to be
164 * read while leaving the data in a buffer without actually processing it,
165 * it may be necessary to register the file descriptor source to be re-checked,
166 * see wl_event_source_check(). This will ensure that the dispatch function
167 * gets called even if the file descriptor is not readable or writable
168 * anymore. This is especially useful with IPC libraries that automatically
169 * buffer incoming data, possibly as a side-effect of other operations.
170 *
171 * \sa wl_event_loop_fd_func_t
172 * \memberof wl_event_source
173 */
174WL_EXPORT struct wl_event_source *
175wl_event_loop_add_fd(struct wl_event_loop *loop,
176 int fd, uint32_t mask,
177 wl_event_loop_fd_func_t func,
178 void *data)
179{
180 struct wl_event_source_fd *source;
181
182 source = malloc(size: sizeof *source);
183 if (source == NULL)
184 return NULL;
185
186 source->base.interface = &fd_source_interface;
187 source->base.fd = wl_os_dupfd_cloexec(fd, minfd: 0);
188 source->func = func;
189 source->fd = fd;
190
191 return add_source(loop, source: &source->base, mask, data);
192}
193
194/** Update a file descriptor source's event mask
195 *
196 * \param source The file descriptor event source to update.
197 * \param mask The new mask, a bitwise-or of: \c WL_EVENT_READABLE,
198 * \c WL_EVENT_WRITABLE.
199 * \return 0 on success, -1 on failure.
200 *
201 * This changes which events, readable and/or writable, cause the dispatch
202 * callback to be called on.
203 *
204 * File descriptors are usually writable to begin with, so they do not need to
205 * be polled for writable until a write actually fails. When a write fails,
206 * the event mask can be changed to poll for readable and writable, delivering
207 * a dispatch callback when it is possible to write more. Once all data has
208 * been written, the mask can be changed to poll only for readable to avoid
209 * busy-looping on dispatch.
210 *
211 * \sa wl_event_loop_add_fd()
212 * \memberof wl_event_source
213 */
214WL_EXPORT int
215wl_event_source_fd_update(struct wl_event_source *source, uint32_t mask)
216{
217 struct wl_event_loop *loop = source->loop;
218 struct epoll_event ep;
219
220 memset(s: &ep, c: 0, n: sizeof ep);
221 if (mask & WL_EVENT_READABLE)
222 ep.events |= EPOLLIN;
223 if (mask & WL_EVENT_WRITABLE)
224 ep.events |= EPOLLOUT;
225 ep.data.ptr = source;
226
227 return epoll_ctl(epfd: loop->epoll_fd, EPOLL_CTL_MOD, fd: source->fd, event: &ep);
228}
229
230/** \cond INTERNAL */
231
232struct wl_event_source_timer {
233 struct wl_event_source base;
234 wl_event_loop_timer_func_t func;
235 struct wl_event_source_timer *next_due;
236 struct timespec deadline;
237 int heap_idx;
238};
239
240static int
241noop_dispatch(struct wl_event_source *source,
242 struct epoll_event *ep) {
243 return 0;
244}
245
246struct wl_event_source_interface timer_heap_source_interface = {
247 noop_dispatch,
248};
249
250static bool
251time_lt(struct timespec ta, struct timespec tb)
252{
253 if (ta.tv_sec != tb.tv_sec) {
254 return ta.tv_sec < tb.tv_sec;
255 }
256 return ta.tv_nsec < tb.tv_nsec;
257}
258
259static int
260set_timer(int timerfd, struct timespec deadline) {
261 struct itimerspec its;
262
263 its.it_interval.tv_sec = 0;
264 its.it_interval.tv_nsec = 0;
265 its.it_value = deadline;
266 return timerfd_settime(ufd: timerfd, TFD_TIMER_ABSTIME, utmr: &its, NULL);
267}
268
269static int
270clear_timer(int timerfd)
271{
272 struct itimerspec its;
273
274 its.it_interval.tv_sec = 0;
275 its.it_interval.tv_nsec = 0;
276 its.it_value.tv_sec = 0;
277 its.it_value.tv_nsec = 0;
278 return timerfd_settime(ufd: timerfd, flags: 0, utmr: &its, NULL);
279}
280
281static void
282wl_timer_heap_init(struct wl_timer_heap *timers, struct wl_event_loop *loop)
283{
284 timers->base.fd = -1;
285 timers->base.data = NULL;
286 wl_list_init(list: &timers->base.link);
287 timers->base.interface = &timer_heap_source_interface;
288 timers->base.loop = loop;
289
290 loop->timers.data = NULL;
291 loop->timers.active = 0;
292 loop->timers.space = 0;
293 loop->timers.count = 0;
294}
295
296static void
297wl_timer_heap_release(struct wl_timer_heap *timers)
298{
299 if (timers->base.fd != -1) {
300 close(fd: timers->base.fd);
301 }
302 free(ptr: timers->data);
303}
304
305static int
306wl_timer_heap_ensure_timerfd(struct wl_timer_heap *timers)
307{
308 struct epoll_event ep;
309 int timer_fd;
310
311 if (timers->base.fd != -1)
312 return 0;
313
314 memset(s: &ep, c: 0, n: sizeof ep);
315 ep.events = EPOLLIN;
316 ep.data.ptr = timers;
317
318 timer_fd = timerfd_create(CLOCK_MONOTONIC,
319 TFD_CLOEXEC | TFD_NONBLOCK);
320 if (timer_fd < 0)
321 return -1;
322
323 if (epoll_ctl(epfd: timers->base.loop->epoll_fd,
324 EPOLL_CTL_ADD, fd: timer_fd, event: &ep) < 0) {
325 close(fd: timer_fd);
326 return -1;
327 }
328
329 timers->base.fd = timer_fd;
330 return 0;
331}
332
333static int
334wl_timer_heap_reserve(struct wl_timer_heap *timers)
335{
336 struct wl_event_source_timer **n;
337 int new_space;
338
339 if (timers->count + 1 > timers->space) {
340 new_space = timers->space >= 8 ? timers->space * 2 : 8;
341 n = realloc(ptr: timers->data, size: (size_t)new_space * sizeof(*n));
342 if (!n) {
343 wl_log(fmt: "Allocation failure when expanding timer list\n");
344 return -1;
345 }
346 timers->data = n;
347 timers->space = new_space;
348 }
349
350 timers->count++;
351 return 0;
352}
353
354static void
355wl_timer_heap_unreserve(struct wl_timer_heap *timers)
356{
357 struct wl_event_source_timer **n;
358
359 timers->count--;
360
361 if (timers->space >= 16 && timers->space >= 4 * timers->count) {
362 n = realloc(ptr: timers->data, size: (size_t)timers->space / 2 * sizeof(*n));
363 if (!n) {
364 wl_log(fmt: "Reallocation failure when shrinking timer list\n");
365 return;
366 }
367 timers->data = n;
368 timers->space = timers->space / 2;
369 }
370}
371
372static int
373heap_set(struct wl_event_source_timer **data,
374 struct wl_event_source_timer *a,
375 int idx)
376{
377 int tmp;
378
379 tmp = a->heap_idx;
380 a->heap_idx = idx;
381 data[a->heap_idx] = a;
382
383 return tmp;
384}
385
386static void
387heap_sift_down(struct wl_event_source_timer **data,
388 int num_active,
389 struct wl_event_source_timer *source)
390{
391 struct wl_event_source_timer *child, *other_child;
392 int cursor_idx;
393 struct timespec key;
394
395 cursor_idx = source->heap_idx;
396 key = source->deadline;
397 while (1) {
398 int lchild_idx = cursor_idx * 2 + 1;
399
400 if (lchild_idx >= num_active) {
401 break;
402 }
403
404 child = data[lchild_idx];
405 if (lchild_idx + 1 < num_active) {
406 other_child = data[lchild_idx + 1];
407 if (time_lt(ta: other_child->deadline, tb: child->deadline))
408 child = other_child;
409 }
410
411 if (time_lt(ta: child->deadline, tb: key))
412 cursor_idx = heap_set(data, a: child, idx: cursor_idx);
413 else
414 break;
415 }
416
417 heap_set(data, a: source, idx: cursor_idx);
418}
419
420static void
421heap_sift_up(struct wl_event_source_timer **data,
422 struct wl_event_source_timer *source)
423{
424 int cursor_idx;
425 struct timespec key;
426
427 cursor_idx = source->heap_idx;
428 key = source->deadline;
429 while (cursor_idx > 0) {
430 struct wl_event_source_timer *parent =
431 data[(cursor_idx - 1) / 2];
432
433 if (time_lt(ta: key, tb: parent->deadline))
434 cursor_idx = heap_set(data, a: parent, idx: cursor_idx);
435 else
436 break;
437 }
438 heap_set(data, a: source, idx: cursor_idx);
439}
440
441/* requires timer be armed */
442static void
443wl_timer_heap_disarm(struct wl_timer_heap *timers,
444 struct wl_event_source_timer *source)
445{
446 struct wl_event_source_timer *last_end_evt;
447 int old_source_idx;
448
449 assert(source->heap_idx >= 0);
450
451 old_source_idx = source->heap_idx;
452 source->heap_idx = -1;
453 source->deadline.tv_sec = 0;
454 source->deadline.tv_nsec = 0;
455
456 last_end_evt = timers->data[timers->active - 1];
457 timers->data[timers->active - 1] = NULL;
458 timers->active--;
459
460 if (old_source_idx == timers->active)
461 return;
462
463 timers->data[old_source_idx] = last_end_evt;
464 last_end_evt->heap_idx = old_source_idx;
465
466 /* Move the displaced (active) element to its proper place.
467 * Only one of sift-down and sift-up will have any effect */
468 heap_sift_down(data: timers->data, num_active: timers->active, source: last_end_evt);
469 heap_sift_up(data: timers->data, source: last_end_evt);
470}
471
472/* requires timer be disarmed */
473static void
474wl_timer_heap_arm(struct wl_timer_heap *timers,
475 struct wl_event_source_timer *source,
476 struct timespec deadline)
477{
478 assert(source->heap_idx == -1);
479
480 source->deadline = deadline;
481 timers->data[timers->active] = source;
482 source->heap_idx = timers->active;
483 timers->active++;
484 heap_sift_up(data: timers->data, source);
485}
486
487
488static int
489wl_timer_heap_dispatch(struct wl_timer_heap *timers)
490{
491 struct timespec now;
492 struct wl_event_source_timer *root;
493 struct wl_event_source_timer *list_cursor = NULL, *list_tail = NULL;
494
495 clock_gettime(CLOCK_MONOTONIC, tp: &now);
496
497 while (timers->active > 0) {
498 root = timers->data[0];
499 if (time_lt(ta: now, tb: root->deadline))
500 break;
501
502 wl_timer_heap_disarm(timers, source: root);
503
504 if (list_cursor == NULL)
505 list_cursor = root;
506 else
507 list_tail->next_due = root;
508 list_tail = root;
509 }
510 if (list_tail)
511 list_tail->next_due = NULL;
512
513 if (timers->active > 0) {
514 if (set_timer(timerfd: timers->base.fd, deadline: timers->data[0]->deadline) < 0)
515 return -1;
516 } else {
517 if (clear_timer(timerfd: timers->base.fd) < 0)
518 return -1;
519 }
520
521 /* Execute precisely the functions for events before `now`, in order.
522 * Because wl_event_loop_dispatch ignores return codes, do the same
523 * here as well */
524 for (; list_cursor; list_cursor = list_cursor->next_due) {
525 if (list_cursor->base.fd != TIMER_REMOVED)
526 list_cursor->func(list_cursor->base.data);
527 }
528
529 return 0;
530}
531
532static int
533wl_event_source_timer_dispatch(struct wl_event_source *source,
534 struct epoll_event *ep)
535{
536 struct wl_event_source_timer *timer;
537
538 timer = wl_container_of(source, timer, base);
539 return timer->func(timer->base.data);
540}
541
542struct wl_event_source_interface timer_source_interface = {
543 wl_event_source_timer_dispatch,
544};
545
546/** \endcond */
547
548/** Create a timer event source
549 *
550 * \param loop The event loop that will process the new source.
551 * \param func The timer dispatch function.
552 * \param data User data.
553 * \return A new timer event source.
554 *
555 * The timer is initially disarmed. It needs to be armed with a call to
556 * wl_event_source_timer_update() before it can trigger a dispatch call.
557 *
558 * \sa wl_event_loop_timer_func_t
559 * \memberof wl_event_source
560 */
561WL_EXPORT struct wl_event_source *
562wl_event_loop_add_timer(struct wl_event_loop *loop,
563 wl_event_loop_timer_func_t func,
564 void *data)
565{
566 struct wl_event_source_timer *source;
567
568 if (wl_timer_heap_ensure_timerfd(timers: &loop->timers) < 0)
569 return NULL;
570
571 source = malloc(size: sizeof *source);
572 if (source == NULL)
573 return NULL;
574
575 source->base.interface = &timer_source_interface;
576 source->base.fd = -1;
577 source->func = func;
578 source->base.loop = loop;
579 source->base.data = data;
580 wl_list_init(list: &source->base.link);
581 source->next_due = NULL;
582 source->deadline.tv_sec = 0;
583 source->deadline.tv_nsec = 0;
584 source->heap_idx = -1;
585
586 if (wl_timer_heap_reserve(timers: &loop->timers) < 0) {
587 free(ptr: source);
588 return NULL;
589 }
590
591 return &source->base;
592}
593
594/** Arm or disarm a timer
595 *
596 * \param source The timer event source to modify.
597 * \param ms_delay The timeout in milliseconds.
598 * \return 0 on success, -1 on failure.
599 *
600 * If the timeout is zero, the timer is disarmed.
601 *
602 * If the timeout is non-zero, the timer is set to expire after the given
603 * timeout in milliseconds. When the timer expires, the dispatch function
604 * set with wl_event_loop_add_timer() is called once from
605 * wl_event_loop_dispatch(). If another dispatch is desired after another
606 * expiry, wl_event_source_timer_update() needs to be called again.
607 *
608 * \memberof wl_event_source
609 */
610WL_EXPORT int
611wl_event_source_timer_update(struct wl_event_source *source, int ms_delay)
612{
613 struct wl_event_source_timer *tsource =
614 wl_container_of(source, tsource, base);
615 struct wl_timer_heap *timers = &tsource->base.loop->timers;
616
617 if (ms_delay > 0) {
618 struct timespec deadline;
619
620 clock_gettime(CLOCK_MONOTONIC, tp: &deadline);
621
622 deadline.tv_nsec += (ms_delay % 1000) * 1000000L;
623 deadline.tv_sec += ms_delay / 1000;
624 if (deadline.tv_nsec >= 1000000000L) {
625 deadline.tv_nsec -= 1000000000L;
626 deadline.tv_sec += 1;
627 }
628
629 if (tsource->heap_idx == -1) {
630 wl_timer_heap_arm(timers, source: tsource, deadline);
631 } else if (time_lt(ta: deadline, tb: tsource->deadline)) {
632 tsource->deadline = deadline;
633 heap_sift_up(data: timers->data, source: tsource);
634 } else {
635 tsource->deadline = deadline;
636 heap_sift_down(data: timers->data, num_active: timers->active, source: tsource);
637 }
638
639 if (tsource->heap_idx == 0) {
640 /* Only update the timerfd if the new deadline is
641 * the earliest */
642 if (set_timer(timerfd: timers->base.fd, deadline) < 0)
643 return -1;
644 }
645 } else {
646 if (tsource->heap_idx == -1)
647 return 0;
648 wl_timer_heap_disarm(timers, source: tsource);
649
650 if (timers->active == 0) {
651 /* Only update the timerfd if this was the last
652 * active timer */
653 if (clear_timer(timerfd: timers->base.fd) < 0)
654 return -1;
655 }
656 }
657
658 return 0;
659}
660
661/** \cond INTERNAL */
662
663struct wl_event_source_signal {
664 struct wl_event_source base;
665 int signal_number;
666 wl_event_loop_signal_func_t func;
667};
668
669/** \endcond */
670
671static int
672wl_event_source_signal_dispatch(struct wl_event_source *source,
673 struct epoll_event *ep)
674{
675 struct wl_event_source_signal *signal_source =
676 (struct wl_event_source_signal *) source;
677 struct signalfd_siginfo signal_info;
678 int len;
679
680 len = read(fd: source->fd, buf: &signal_info, nbytes: sizeof signal_info);
681 if (!(len == -1 && errno == EAGAIN) && len != sizeof signal_info)
682 /* Is there anything we can do here? Will this ever happen? */
683 wl_log(fmt: "signalfd read error: %s\n", strerror(errno));
684
685 return signal_source->func(signal_source->signal_number,
686 signal_source->base.data);
687}
688
689struct wl_event_source_interface signal_source_interface = {
690 wl_event_source_signal_dispatch,
691};
692
693/** Create a POSIX signal event source
694 *
695 * \param loop The event loop that will process the new source.
696 * \param signal_number Number of the signal to watch for.
697 * \param func The signal dispatch function.
698 * \param data User data.
699 * \return A new signal event source.
700 *
701 * This function blocks the normal delivery of the given signal in the calling
702 * thread, and creates a "watch" for it. Signal delivery no longer happens
703 * asynchronously, but by wl_event_loop_dispatch() calling the dispatch
704 * callback function \c func.
705 *
706 * It is the caller's responsibility to ensure that all other threads have
707 * also blocked the signal.
708 *
709 * \sa wl_event_loop_signal_func_t
710 * \memberof wl_event_source
711 */
712WL_EXPORT struct wl_event_source *
713wl_event_loop_add_signal(struct wl_event_loop *loop,
714 int signal_number,
715 wl_event_loop_signal_func_t func,
716 void *data)
717{
718 struct wl_event_source_signal *source;
719 sigset_t mask;
720
721 source = malloc(size: sizeof *source);
722 if (source == NULL)
723 return NULL;
724
725 source->base.interface = &signal_source_interface;
726 source->signal_number = signal_number;
727
728 sigemptyset(set: &mask);
729 sigaddset(set: &mask, signo: signal_number);
730 source->base.fd = signalfd(fd: -1, mask: &mask, SFD_CLOEXEC | SFD_NONBLOCK);
731 sigprocmask(SIG_BLOCK, set: &mask, NULL);
732
733 source->func = func;
734
735 return add_source(loop, source: &source->base, mask: WL_EVENT_READABLE, data);
736}
737
738/** \cond INTERNAL */
739
740struct wl_event_source_idle {
741 struct wl_event_source base;
742 wl_event_loop_idle_func_t func;
743};
744
745/** \endcond */
746
747struct wl_event_source_interface idle_source_interface = {
748 NULL,
749};
750
751/** Create an idle task
752 *
753 * \param loop The event loop that will process the new task.
754 * \param func The idle task dispatch function.
755 * \param data User data.
756 * \return A new idle task (an event source).
757 *
758 * Idle tasks are dispatched before wl_event_loop_dispatch() goes to sleep.
759 * See wl_event_loop_dispatch() for more details.
760 *
761 * Idle tasks fire once, and are automatically destroyed right after the
762 * callback function has been called.
763 *
764 * An idle task can be cancelled before the callback has been called by
765 * wl_event_source_remove(). Calling wl_event_source_remove() after or from
766 * within the callback results in undefined behaviour.
767 *
768 * \sa wl_event_loop_idle_func_t
769 * \memberof wl_event_source
770 */
771WL_EXPORT struct wl_event_source *
772wl_event_loop_add_idle(struct wl_event_loop *loop,
773 wl_event_loop_idle_func_t func,
774 void *data)
775{
776 struct wl_event_source_idle *source;
777
778 source = malloc(size: sizeof *source);
779 if (source == NULL)
780 return NULL;
781
782 source->base.interface = &idle_source_interface;
783 source->base.loop = loop;
784 source->base.fd = -1;
785
786 source->func = func;
787 source->base.data = data;
788
789 wl_list_insert(list: loop->idle_list.prev, elm: &source->base.link);
790
791 return &source->base;
792}
793
794/** Mark event source to be re-checked
795 *
796 * \param source The event source to be re-checked.
797 *
798 * This function permanently marks the event source to be re-checked after
799 * the normal dispatch of sources in wl_event_loop_dispatch(). Re-checking
800 * will keep iterating over all such event sources until the dispatch
801 * function for them all returns zero.
802 *
803 * Re-checking is used on sources that may become ready to dispatch as a
804 * side-effect of dispatching themselves or other event sources, including idle
805 * sources. Re-checking ensures all the incoming events have been fully drained
806 * before wl_event_loop_dispatch() returns.
807 *
808 * \memberof wl_event_source
809 */
810WL_EXPORT void
811wl_event_source_check(struct wl_event_source *source)
812{
813 wl_list_insert(list: source->loop->check_list.prev, elm: &source->link);
814}
815
816/** Remove an event source from its event loop
817 *
818 * \param source The event source to be removed.
819 * \return Zero.
820 *
821 * The event source is removed from the event loop it was created for,
822 * and is effectively destroyed. This invalidates \c source .
823 * The dispatch function of the source will no longer be called through this
824 * source.
825 *
826 * \memberof wl_event_source
827 */
828WL_EXPORT int
829wl_event_source_remove(struct wl_event_source *source)
830{
831 struct wl_event_loop *loop = source->loop;
832
833 /* We need to explicitly remove the fd, since closing the fd
834 * isn't enough in case we've dup'ed the fd. */
835 if (source->fd >= 0) {
836 epoll_ctl(epfd: loop->epoll_fd, EPOLL_CTL_DEL, fd: source->fd, NULL);
837 close(fd: source->fd);
838 source->fd = -1;
839 }
840
841 if (source->interface == &timer_source_interface &&
842 source->fd != TIMER_REMOVED) {
843 /* Disarm the timer (and the loop's timerfd, if necessary),
844 * before removing its space in the loop timer heap */
845 wl_event_source_timer_update(source, ms_delay: 0);
846 wl_timer_heap_unreserve(timers: &loop->timers);
847 /* Set the fd field to to indicate that the timer should NOT
848 * be dispatched in `wl_event_loop_dispatch` */
849 source->fd = TIMER_REMOVED;
850 }
851
852 wl_list_remove(elm: &source->link);
853 wl_list_insert(list: &loop->destroy_list, elm: &source->link);
854
855 return 0;
856}
857
858static void
859wl_event_loop_process_destroy_list(struct wl_event_loop *loop)
860{
861 struct wl_event_source *source, *next;
862
863 wl_list_for_each_safe(source, next, &loop->destroy_list, link)
864 free(ptr: source);
865
866 wl_list_init(list: &loop->destroy_list);
867}
868
869/** Create a new event loop context
870 *
871 * \return A new event loop context object.
872 *
873 * This creates a new event loop context. Initially this context is empty.
874 * Event sources need to be explicitly added to it.
875 *
876 * Normally the event loop is run by calling wl_event_loop_dispatch() in
877 * a loop until the program terminates. Alternatively, an event loop can be
878 * embedded in another event loop by its file descriptor, see
879 * wl_event_loop_get_fd().
880 *
881 * \memberof wl_event_loop
882 */
883WL_EXPORT struct wl_event_loop *
884wl_event_loop_create(void)
885{
886 struct wl_event_loop *loop;
887
888 loop = malloc(size: sizeof *loop);
889 if (loop == NULL)
890 return NULL;
891
892 loop->epoll_fd = wl_os_epoll_create_cloexec();
893 if (loop->epoll_fd < 0) {
894 free(ptr: loop);
895 return NULL;
896 }
897 wl_list_init(list: &loop->check_list);
898 wl_list_init(list: &loop->idle_list);
899 wl_list_init(list: &loop->destroy_list);
900
901 wl_signal_init(signal: &loop->destroy_signal);
902
903 wl_timer_heap_init(timers: &loop->timers, loop);
904
905 return loop;
906}
907
908/** Destroy an event loop context
909 *
910 * \param loop The event loop to be destroyed.
911 *
912 * This emits the event loop destroy signal, closes the event loop file
913 * descriptor, and frees \c loop.
914 *
915 * If the event loop has existing sources, those cannot be safely removed
916 * afterwards. Therefore one must call wl_event_source_remove() on all
917 * event sources before destroying the event loop context.
918 *
919 * \memberof wl_event_loop
920 */
921WL_EXPORT void
922wl_event_loop_destroy(struct wl_event_loop *loop)
923{
924 wl_signal_emit(signal: &loop->destroy_signal, data: loop);
925
926 wl_event_loop_process_destroy_list(loop);
927 wl_timer_heap_release(timers: &loop->timers);
928 close(fd: loop->epoll_fd);
929 free(ptr: loop);
930}
931
932static bool
933post_dispatch_check(struct wl_event_loop *loop)
934{
935 struct epoll_event ep;
936 struct wl_event_source *source, *next;
937 bool needs_recheck = false;
938
939 ep.events = 0;
940 wl_list_for_each_safe(source, next, &loop->check_list, link) {
941 int dispatch_result;
942
943 dispatch_result = source->interface->dispatch(source, &ep);
944 if (dispatch_result < 0) {
945 wl_log(fmt: "Source dispatch function returned negative value!\n");
946 wl_log(fmt: "This would previously accidentally suppress a follow-up dispatch\n");
947 }
948 needs_recheck |= dispatch_result != 0;
949 }
950
951 return needs_recheck;
952}
953
954/** Dispatch the idle sources
955 *
956 * \param loop The event loop whose idle sources are dispatched.
957 *
958 * \sa wl_event_loop_add_idle()
959 * \memberof wl_event_loop
960 */
961WL_EXPORT void
962wl_event_loop_dispatch_idle(struct wl_event_loop *loop)
963{
964 struct wl_event_source_idle *source;
965
966 while (!wl_list_empty(list: &loop->idle_list)) {
967 source = wl_container_of(loop->idle_list.next,
968 source, base.link);
969 source->func(source->base.data);
970 wl_event_source_remove(source: &source->base);
971 }
972}
973
974/** Wait for events and dispatch them
975 *
976 * \param loop The event loop whose sources to wait for.
977 * \param timeout The polling timeout in milliseconds.
978 * \return 0 for success, -1 for polling (or timer update) error.
979 *
980 * All the associated event sources are polled. This function blocks until
981 * any event source delivers an event (idle sources excluded), or the timeout
982 * expires. A timeout of -1 disables the timeout, causing the function to block
983 * indefinitely. A timeout of zero causes the poll to always return immediately.
984 *
985 * All idle sources are dispatched before blocking. An idle source is destroyed
986 * when it is dispatched. After blocking, all other ready sources are
987 * dispatched. Then, idle sources are dispatched again, in case the dispatched
988 * events created idle sources. Finally, all sources marked with
989 * wl_event_source_check() are dispatched in a loop until their dispatch
990 * functions all return zero.
991 *
992 * \memberof wl_event_loop
993 */
994WL_EXPORT int
995wl_event_loop_dispatch(struct wl_event_loop *loop, int timeout)
996{
997 struct epoll_event ep[32];
998 struct wl_event_source *source;
999 int i, count;
1000 bool has_timers = false;
1001
1002 wl_event_loop_dispatch_idle(loop);
1003
1004 count = epoll_wait(epfd: loop->epoll_fd, events: ep, ARRAY_LENGTH(ep), timeout: timeout);
1005 if (count < 0)
1006 return -1;
1007
1008 for (i = 0; i < count; i++) {
1009 source = ep[i].data.ptr;
1010 if (source == &loop->timers.base)
1011 has_timers = true;
1012 }
1013
1014 if (has_timers) {
1015 /* Dispatch timer sources before non-timer sources, so that
1016 * the non-timer sources can not cancel (by calling
1017 * `wl_event_source_timer_update`) the dispatching of the timers
1018 * (Note that timer sources also can't cancel pending non-timer
1019 * sources, since epoll_wait has already been called) */
1020 if (wl_timer_heap_dispatch(timers: &loop->timers) < 0)
1021 return -1;
1022 }
1023
1024 for (i = 0; i < count; i++) {
1025 source = ep[i].data.ptr;
1026 if (source->fd != -1)
1027 source->interface->dispatch(source, &ep[i]);
1028 }
1029
1030 wl_event_loop_process_destroy_list(loop);
1031
1032 wl_event_loop_dispatch_idle(loop);
1033
1034 while (post_dispatch_check(loop));
1035
1036 return 0;
1037}
1038
1039/** Get the event loop file descriptor
1040 *
1041 * \param loop The event loop context.
1042 * \return The aggregate file descriptor.
1043 *
1044 * This function returns the aggregate file descriptor, that represents all
1045 * the event sources (idle sources excluded) associated with the given event
1046 * loop context. When any event source makes an event available, it will be
1047 * reflected in the aggregate file descriptor.
1048 *
1049 * When the aggregate file descriptor delivers an event, one can call
1050 * wl_event_loop_dispatch() on the event loop context to dispatch all the
1051 * available events.
1052 *
1053 * \memberof wl_event_loop
1054 */
1055WL_EXPORT int
1056wl_event_loop_get_fd(struct wl_event_loop *loop)
1057{
1058 return loop->epoll_fd;
1059}
1060
1061/** Register a destroy listener for an event loop context
1062 *
1063 * \param loop The event loop context whose destruction to listen for.
1064 * \param listener The listener with the callback to be called.
1065 *
1066 * \sa wl_listener
1067 * \memberof wl_event_loop
1068 */
1069WL_EXPORT void
1070wl_event_loop_add_destroy_listener(struct wl_event_loop *loop,
1071 struct wl_listener *listener)
1072{
1073 wl_signal_add(signal: &loop->destroy_signal, listener);
1074}
1075
1076/** Get the listener struct for the specified callback
1077 *
1078 * \param loop The event loop context to inspect.
1079 * \param notify The destroy callback to find.
1080 * \return The wl_listener registered to the event loop context with
1081 * the given callback pointer.
1082 *
1083 * \memberof wl_event_loop
1084 */
1085WL_EXPORT struct wl_listener *
1086wl_event_loop_get_destroy_listener(struct wl_event_loop *loop,
1087 wl_notify_func_t notify)
1088{
1089 return wl_signal_get(signal: &loop->destroy_signal, notify);
1090}
1091

source code of gtk/subprojects/wayland/src/event-loop.c