1 | /* |
2 | Copyright (C) 2005 John McCutchan |
3 | Copyright © 2015 Canonical Limited |
4 | |
5 | This library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | This library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public License |
16 | along with this library; if not, see <http://www.gnu.org/licenses/>. |
17 | |
18 | Authors: |
19 | Ryan Lortie <desrt@desrt.ca> |
20 | John McCutchan <john@johnmccutchan.com> |
21 | */ |
22 | |
23 | #include "config.h" |
24 | |
25 | #include <stdio.h> |
26 | #include <sys/ioctl.h> |
27 | #include <unistd.h> |
28 | #include <errno.h> |
29 | #include <string.h> |
30 | #include <glib.h> |
31 | #include "inotify-kernel.h" |
32 | #include <sys/inotify.h> |
33 | #ifdef HAVE_SYS_FILIO_H |
34 | #include <sys/filio.h> |
35 | #endif |
36 | #include <glib/glib-unix.h> |
37 | |
38 | #include "glib-private.h" |
39 | |
40 | /* From inotify(7) */ |
41 | #define MAX_EVENT_SIZE (sizeof(struct inotify_event) + NAME_MAX + 1) |
42 | |
43 | /* Amount of time to sleep on receipt of uninteresting events */ |
44 | #define BOREDOM_SLEEP_TIME (100 * G_TIME_SPAN_MILLISECOND) |
45 | |
46 | /* Define limits on the maximum amount of time and maximum amount of |
47 | * interceding events between FROM/TO that can be merged. |
48 | */ |
49 | #define MOVE_PAIR_DELAY (10 * G_TIME_SPAN_MILLISECOND) |
50 | #define MOVE_PAIR_DISTANCE (100) |
51 | |
52 | /* We use the lock from inotify-helper.c |
53 | * |
54 | * We only have to take it on our read callback. |
55 | * |
56 | * The rest of locking is taken care of in inotify-helper.c |
57 | */ |
58 | G_LOCK_EXTERN (inotify_lock); |
59 | |
60 | static ik_event_t * |
61 | ik_event_new (struct inotify_event *kevent, |
62 | gint64 now) |
63 | { |
64 | ik_event_t *event = g_new0 (ik_event_t, 1); |
65 | |
66 | event->wd = kevent->wd; |
67 | event->mask = kevent->mask; |
68 | event->cookie = kevent->cookie; |
69 | event->len = kevent->len; |
70 | event->timestamp = now; |
71 | if (event->len) |
72 | event->name = g_strdup (str: kevent->name); |
73 | else |
74 | event->name = NULL; |
75 | |
76 | return event; |
77 | } |
78 | |
79 | void |
80 | _ik_event_free (ik_event_t *event) |
81 | { |
82 | if (event->pair) |
83 | { |
84 | event->pair->pair = NULL; |
85 | _ik_event_free (event: event->pair); |
86 | } |
87 | |
88 | g_free (mem: event->name); |
89 | g_free (mem: event); |
90 | } |
91 | |
92 | typedef struct |
93 | { |
94 | GSource source; |
95 | |
96 | GQueue queue; |
97 | gpointer fd_tag; |
98 | gint fd; |
99 | |
100 | GHashTable *unmatched_moves; |
101 | gboolean is_bored; |
102 | } InotifyKernelSource; |
103 | |
104 | static InotifyKernelSource *inotify_source; |
105 | |
106 | static gint64 |
107 | ik_source_get_dispatch_time (InotifyKernelSource *iks) |
108 | { |
109 | ik_event_t *head; |
110 | |
111 | head = g_queue_peek_head (queue: &iks->queue); |
112 | |
113 | /* nothing in the queue: not ready */ |
114 | if (!head) |
115 | return -1; |
116 | |
117 | /* if it's not an unpaired move, it is ready now */ |
118 | if (~head->mask & IN_MOVED_FROM || head->pair) |
119 | return 0; |
120 | |
121 | /* if the queue is too long then it's ready now */ |
122 | if (iks->queue.length > MOVE_PAIR_DISTANCE) |
123 | return 0; |
124 | |
125 | /* otherwise, it's ready after the delay */ |
126 | return head->timestamp + MOVE_PAIR_DELAY; |
127 | } |
128 | |
129 | static gboolean |
130 | ik_source_can_dispatch_now (InotifyKernelSource *iks, |
131 | gint64 now) |
132 | { |
133 | gint64 dispatch_time; |
134 | |
135 | dispatch_time = ik_source_get_dispatch_time (iks); |
136 | |
137 | return 0 <= dispatch_time && dispatch_time <= now; |
138 | } |
139 | |
140 | static gsize |
141 | ik_source_read_some_events (InotifyKernelSource *iks, |
142 | gchar *buffer, |
143 | gsize buffer_len) |
144 | { |
145 | gssize result; |
146 | int errsv; |
147 | |
148 | again: |
149 | result = read (fd: iks->fd, buf: buffer, nbytes: buffer_len); |
150 | errsv = errno; |
151 | |
152 | if (result < 0) |
153 | { |
154 | if (errsv == EINTR) |
155 | goto again; |
156 | |
157 | if (errsv == EAGAIN) |
158 | return 0; |
159 | |
160 | g_error ("inotify read(): %s" , g_strerror (errsv)); |
161 | } |
162 | else if (result == 0) |
163 | g_error ("inotify unexpectedly hit eof" ); |
164 | |
165 | return result; |
166 | } |
167 | |
168 | static gchar * |
169 | ik_source_read_all_the_events (InotifyKernelSource *iks, |
170 | gchar *buffer, |
171 | gsize buffer_len, |
172 | gsize *length_out) |
173 | { |
174 | gsize n_read; |
175 | |
176 | n_read = ik_source_read_some_events (iks, buffer, buffer_len); |
177 | |
178 | /* Check if we might have gotten another event if we had passed in a |
179 | * bigger buffer... |
180 | */ |
181 | if (n_read + MAX_EVENT_SIZE > buffer_len) |
182 | { |
183 | gchar *new_buffer; |
184 | guint n_readable; |
185 | gint result; |
186 | int errsv; |
187 | |
188 | /* figure out how many more bytes there are to read */ |
189 | result = ioctl (fd: iks->fd, FIONREAD, &n_readable); |
190 | errsv = errno; |
191 | if (result != 0) |
192 | g_error ("inotify ioctl(FIONREAD): %s" , g_strerror (errsv)); |
193 | |
194 | if (n_readable != 0) |
195 | { |
196 | /* there is in fact more data. allocate a new buffer, copy |
197 | * the existing data, and then append the remaining. |
198 | */ |
199 | new_buffer = g_malloc (n_bytes: n_read + n_readable); |
200 | memcpy (dest: new_buffer, src: buffer, n: n_read); |
201 | n_read += ik_source_read_some_events (iks, buffer: new_buffer + n_read, buffer_len: n_readable); |
202 | |
203 | buffer = new_buffer; |
204 | |
205 | /* There may be new events in the buffer that were added after |
206 | * the FIONREAD was performed, but we can't risk getting into |
207 | * a loop. We'll get them next time. |
208 | */ |
209 | } |
210 | } |
211 | |
212 | *length_out = n_read; |
213 | |
214 | return buffer; |
215 | } |
216 | |
217 | static gboolean |
218 | ik_source_dispatch (GSource *source, |
219 | GSourceFunc func, |
220 | gpointer user_data) |
221 | { |
222 | InotifyKernelSource *iks = (InotifyKernelSource *) source; |
223 | gboolean (*user_callback) (ik_event_t *event) = (void *) func; |
224 | gboolean interesting = FALSE; |
225 | gint64 now; |
226 | |
227 | now = g_source_get_time (source); |
228 | |
229 | if (iks->is_bored || g_source_query_unix_fd (source, tag: iks->fd_tag)) |
230 | { |
231 | gchar stack_buffer[4096]; |
232 | gsize buffer_len; |
233 | gchar *buffer; |
234 | gsize offset; |
235 | |
236 | /* We want to read all of the available events. |
237 | * |
238 | * We need to do it in a finite number of steps so that we don't |
239 | * get caught in a loop of read() with another process |
240 | * continuously adding events each time we drain them. |
241 | * |
242 | * In the normal case we will have only a few events in the queue, |
243 | * so start out by reading into a small stack-allocated buffer. |
244 | * Even though we're on a fresh stack frame, there is no need to |
245 | * pointlessly blow up with the size of the worker thread stack |
246 | * with a huge buffer here. |
247 | * |
248 | * If the result is large enough to cause us to suspect that |
249 | * another event may be pending then we allocate a buffer on the |
250 | * heap that can hold all of the events and read (once!) into that |
251 | * buffer. |
252 | */ |
253 | buffer = ik_source_read_all_the_events (iks, buffer: stack_buffer, buffer_len: sizeof stack_buffer, length_out: &buffer_len); |
254 | |
255 | offset = 0; |
256 | |
257 | while (offset < buffer_len) |
258 | { |
259 | struct inotify_event *kevent = (struct inotify_event *) (buffer + offset); |
260 | ik_event_t *event; |
261 | |
262 | event = ik_event_new (kevent, now); |
263 | |
264 | offset += sizeof (struct inotify_event) + event->len; |
265 | |
266 | if (event->mask & IN_MOVED_TO) |
267 | { |
268 | ik_event_t *pair; |
269 | |
270 | pair = g_hash_table_lookup (hash_table: iks->unmatched_moves, GUINT_TO_POINTER (event->cookie)); |
271 | if (pair != NULL) |
272 | { |
273 | g_assert (!pair->pair); |
274 | |
275 | g_hash_table_remove (hash_table: iks->unmatched_moves, GUINT_TO_POINTER (event->cookie)); |
276 | event->is_second_in_pair = TRUE; |
277 | event->pair = pair; |
278 | pair->pair = event; |
279 | continue; |
280 | } |
281 | |
282 | interesting = TRUE; |
283 | } |
284 | |
285 | else if (event->mask & IN_MOVED_FROM) |
286 | { |
287 | gboolean new; |
288 | |
289 | new = g_hash_table_insert (hash_table: iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), value: event); |
290 | if G_UNLIKELY (!new) |
291 | g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x" , event->cookie); |
292 | |
293 | interesting = TRUE; |
294 | } |
295 | |
296 | g_queue_push_tail (queue: &iks->queue, data: event); |
297 | } |
298 | |
299 | if (buffer_len == 0) |
300 | { |
301 | /* We can end up reading nothing if we arrived here due to a |
302 | * boredom timer but the stream of events stopped meanwhile. |
303 | * |
304 | * In that case, we need to switch back to polling the file |
305 | * descriptor in the usual way. |
306 | */ |
307 | g_assert (iks->is_bored); |
308 | interesting = TRUE; |
309 | } |
310 | |
311 | if (buffer != stack_buffer) |
312 | g_free (mem: buffer); |
313 | } |
314 | |
315 | while (ik_source_can_dispatch_now (iks, now)) |
316 | { |
317 | ik_event_t *event; |
318 | |
319 | /* callback will free the event */ |
320 | event = g_queue_pop_head (queue: &iks->queue); |
321 | |
322 | if (event->mask & IN_MOVED_FROM && !event->pair) |
323 | g_hash_table_remove (hash_table: iks->unmatched_moves, GUINT_TO_POINTER (event->cookie)); |
324 | |
325 | G_LOCK (inotify_lock); |
326 | |
327 | interesting |= (* user_callback) (event); |
328 | |
329 | G_UNLOCK (inotify_lock); |
330 | } |
331 | |
332 | /* The queue gets blocked iff we have unmatched moves */ |
333 | g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0)); |
334 | |
335 | /* Here's where we decide what will wake us up next. |
336 | * |
337 | * If the last event was interesting then we will wake up on the fd or |
338 | * when the timeout is reached on an unpaired move (if any). |
339 | * |
340 | * If the last event was uninteresting then we will wake up after the |
341 | * shorter of the boredom sleep or any timeout for an unpaired move. |
342 | */ |
343 | if (interesting) |
344 | { |
345 | if (iks->is_bored) |
346 | { |
347 | g_source_modify_unix_fd (source, tag: iks->fd_tag, new_events: G_IO_IN); |
348 | iks->is_bored = FALSE; |
349 | } |
350 | |
351 | g_source_set_ready_time (source, ready_time: ik_source_get_dispatch_time (iks)); |
352 | } |
353 | else |
354 | { |
355 | guint64 dispatch_time = ik_source_get_dispatch_time (iks); |
356 | guint64 boredom_time = now + BOREDOM_SLEEP_TIME; |
357 | |
358 | if (!iks->is_bored) |
359 | { |
360 | g_source_modify_unix_fd (source, tag: iks->fd_tag, new_events: 0); |
361 | iks->is_bored = TRUE; |
362 | } |
363 | |
364 | g_source_set_ready_time (source, MIN (dispatch_time, boredom_time)); |
365 | } |
366 | |
367 | return TRUE; |
368 | } |
369 | |
370 | static InotifyKernelSource * |
371 | ik_source_new (gboolean (* callback) (ik_event_t *event)) |
372 | { |
373 | static GSourceFuncs source_funcs = { |
374 | NULL, NULL, |
375 | ik_source_dispatch, |
376 | NULL, NULL, NULL |
377 | }; |
378 | InotifyKernelSource *iks; |
379 | GSource *source; |
380 | |
381 | source = g_source_new (source_funcs: &source_funcs, struct_size: sizeof (InotifyKernelSource)); |
382 | iks = (InotifyKernelSource *) source; |
383 | |
384 | g_source_set_name (source, name: "inotify kernel source" ); |
385 | |
386 | iks->unmatched_moves = g_hash_table_new (NULL, NULL); |
387 | iks->fd = inotify_init1 (IN_CLOEXEC); |
388 | |
389 | if (iks->fd < 0) |
390 | iks->fd = inotify_init (); |
391 | |
392 | if (iks->fd >= 0) |
393 | { |
394 | GError *error = NULL; |
395 | |
396 | g_unix_set_fd_nonblocking (fd: iks->fd, TRUE, error: &error); |
397 | g_assert_no_error (error); |
398 | |
399 | iks->fd_tag = g_source_add_unix_fd (source, fd: iks->fd, events: G_IO_IN); |
400 | } |
401 | |
402 | g_source_set_callback (source, func: (GSourceFunc) callback, NULL, NULL); |
403 | |
404 | g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ()); |
405 | |
406 | return iks; |
407 | } |
408 | |
409 | gboolean |
410 | _ik_startup (gboolean (*cb)(ik_event_t *event)) |
411 | { |
412 | if (g_once_init_enter (&inotify_source)) |
413 | g_once_init_leave (&inotify_source, ik_source_new (cb)); |
414 | |
415 | return inotify_source->fd >= 0; |
416 | } |
417 | |
418 | gint32 |
419 | _ik_watch (const char *path, |
420 | guint32 mask, |
421 | int *err) |
422 | { |
423 | gint32 wd = -1; |
424 | |
425 | g_assert (path != NULL); |
426 | g_assert (inotify_source && inotify_source->fd >= 0); |
427 | |
428 | wd = inotify_add_watch (fd: inotify_source->fd, name: path, mask: mask); |
429 | |
430 | if (wd < 0) |
431 | { |
432 | int e = errno; |
433 | /* FIXME: debug msg failed to add watch */ |
434 | if (err) |
435 | *err = e; |
436 | return wd; |
437 | } |
438 | |
439 | g_assert (wd >= 0); |
440 | return wd; |
441 | } |
442 | |
443 | int |
444 | _ik_ignore (const char *path, |
445 | gint32 wd) |
446 | { |
447 | g_assert (wd >= 0); |
448 | g_assert (inotify_source && inotify_source->fd >= 0); |
449 | |
450 | if (inotify_rm_watch (fd: inotify_source->fd, wd: wd) < 0) |
451 | { |
452 | /* int e = errno; */ |
453 | /* failed to rm watch */ |
454 | return -1; |
455 | } |
456 | |
457 | return 0; |
458 | } |
459 | |