1 /*
2 Copyright (C) 2005 John McCutchan
3
4 The Gnome Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Library General Public License as
6 published by the Free Software Foundation; either version 2 of the
7 License, or (at your option) any later version.
8
9 The Gnome Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
13
14 You should have received a copy of the GNU Library General Public
15 License along with the Gnome Library; see the file COPYING.LIB. If not,
16 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA.
18
19 Authors:.
20 John McCutchan <john@johnmccutchan.com>
21 */
22
23 #include "config.h"
24
25 #include <stdio.h>
26 #include <sys/ioctl.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <glib.h>
31 #include "inotify-kernel.h"
32 #include <sys/inotify.h>
33
34 /* Timings for pairing MOVED_TO / MOVED_FROM events */
35 #define PROCESS_EVENTS_TIME 1000 /* milliseconds (1 hz) */
36 #define DEFAULT_HOLD_UNTIL_TIME 0 /* 0 millisecond */
37 #define MOVE_HOLD_UNTIL_TIME 0 /* 0 milliseconds */
38
39 static int inotify_instance_fd = -1;
40 static GQueue *events_to_process = NULL;
41 static GQueue *event_queue = NULL;
42 static GHashTable * cookie_hash = NULL;
43 static GIOChannel *inotify_read_ioc;
44 static GPollFD ik_poll_fd;
45 static gboolean ik_poll_fd_enabled = TRUE;
46 static void (*user_cb)(ik_event_t *event);
47
48 static gboolean ik_read_callback (gpointer user_data);
49 static gboolean ik_process_eq_callback (gpointer user_data);
50
51 static guint32 ik_move_matches = 0;
52 static guint32 ik_move_misses = 0;
53
54 static gboolean process_eq_running = FALSE;
55
56 /* We use the lock from inotify-helper.c
57 *
58 * There are two places that we take this lock
59 *
60 * 1) In ik_read_callback
61 *
62 * 2) ik_process_eq_callback.
63 *
64 *
65 * The rest of locking is taken care of in inotify-helper.c
66 */
67 G_LOCK_EXTERN (inotify_lock);
68
69 typedef struct ik_event_internal {
70 ik_event_t *event;
71 gboolean seen;
72 gboolean sent;
73 GTimeVal hold_until;
74 struct ik_event_internal *pair;
75 } ik_event_internal_t;
76
77 /* In order to perform non-sleeping inotify event chunking we need
78 * a custom GSource
79 */
80 static gboolean
ik_source_prepare(GSource * source,gint * timeout)81 ik_source_prepare (GSource *source,
82 gint *timeout)
83 {
84 return FALSE;
85 }
86
87 static gboolean
ik_source_timeout(gpointer data)88 ik_source_timeout (gpointer data)
89 {
90 GSource *source = (GSource *)data;
91
92 /* Re-active the PollFD */
93 g_source_add_poll (source, &ik_poll_fd);
94 g_source_unref (source);
95 ik_poll_fd_enabled = TRUE;
96
97 return FALSE;
98 }
99
100 #define MAX_PENDING_COUNT 2
101 #define PENDING_THRESHOLD(qsize) ((qsize) >> 1)
102 #define PENDING_MARGINAL_COST(p) ((unsigned int)(1 << (p)))
103 #define MAX_QUEUED_EVENTS 2048
104 #define AVERAGE_EVENT_SIZE sizeof (struct inotify_event) + 16
105 #define TIMEOUT_MILLISECONDS 10
106
107 static gboolean
ik_source_check(GSource * source)108 ik_source_check (GSource *source)
109 {
110 static int prev_pending = 0, pending_count = 0;
111
112 /* We already disabled the PollFD or
113 * nothing to be read from inotify */
114 if (!ik_poll_fd_enabled || !(ik_poll_fd.revents & G_IO_IN))
115 return FALSE;
116
117 if (pending_count < MAX_PENDING_COUNT)
118 {
119 unsigned int pending;
120
121 if (ioctl (inotify_instance_fd, FIONREAD, &pending) == -1)
122 goto do_read;
123
124 pending /= AVERAGE_EVENT_SIZE;
125
126 /* Don't wait if the number of pending events is too close
127 * to the maximum queue size.
128 */
129 if (pending > PENDING_THRESHOLD (MAX_QUEUED_EVENTS))
130 goto do_read;
131
132 /* With each successive iteration, the minimum rate for
133 * further sleep doubles.
134 */
135 if (pending-prev_pending < PENDING_MARGINAL_COST (pending_count))
136 goto do_read;
137
138 prev_pending = pending;
139 pending_count++;
140
141 /* We are going to wait to read the events: */
142
143 /* Remove the PollFD from the source */
144 g_source_remove_poll (source, &ik_poll_fd);
145 /* To avoid threading issues we need to flag that we've done that */
146 ik_poll_fd_enabled = FALSE;
147 /* Set a timeout to re-add the PollFD to the source */
148 g_source_ref (source);
149 g_timeout_add (TIMEOUT_MILLISECONDS, ik_source_timeout, source);
150
151 return FALSE;
152 }
153
154 do_read:
155 /* We are ready to read events from inotify */
156
157 prev_pending = 0;
158 pending_count = 0;
159
160 return TRUE;
161 }
162
163 static gboolean
ik_source_dispatch(GSource * source,GSourceFunc callback,gpointer user_data)164 ik_source_dispatch (GSource *source,
165 GSourceFunc callback,
166 gpointer user_data)
167 {
168 if (callback)
169 return callback (user_data);
170 return TRUE;
171 }
172
173 static GSourceFuncs ik_source_funcs =
174 {
175 ik_source_prepare,
176 ik_source_check,
177 ik_source_dispatch,
178 NULL
179 };
180
_ik_startup(void (* cb)(ik_event_t * event))181 gboolean _ik_startup (void (*cb)(ik_event_t *event))
182 {
183 static gboolean initialized = FALSE;
184 GSource *source;
185
186 user_cb = cb;
187 /* Ignore multi-calls */
188 if (initialized)
189 return inotify_instance_fd >= 0;
190
191 initialized = TRUE;
192 inotify_instance_fd = inotify_init ();
193
194 if (inotify_instance_fd < 0)
195 return FALSE;
196
197 inotify_read_ioc = g_io_channel_unix_new (inotify_instance_fd);
198 ik_poll_fd.fd = inotify_instance_fd;
199 ik_poll_fd.events = G_IO_IN | G_IO_HUP | G_IO_ERR;
200 g_io_channel_set_encoding (inotify_read_ioc, NULL, NULL);
201 g_io_channel_set_flags (inotify_read_ioc, G_IO_FLAG_NONBLOCK, NULL);
202
203 source = g_source_new (&ik_source_funcs, sizeof (GSource));
204 g_source_add_poll (source, &ik_poll_fd);
205 g_source_set_callback (source, ik_read_callback, NULL, NULL);
206 g_source_attach (source, NULL);
207 g_source_unref (source);
208
209 cookie_hash = g_hash_table_new (g_direct_hash, g_direct_equal);
210 event_queue = g_queue_new ();
211 events_to_process = g_queue_new ();
212
213 return TRUE;
214 }
215
216 static ik_event_internal_t *
ik_event_internal_new(ik_event_t * event)217 ik_event_internal_new (ik_event_t *event)
218 {
219 ik_event_internal_t *internal_event = g_new0 (ik_event_internal_t, 1);
220 GTimeVal tv;
221
222 g_assert (event);
223
224 g_get_current_time (&tv);
225 g_time_val_add (&tv, DEFAULT_HOLD_UNTIL_TIME);
226 internal_event->event = event;
227 internal_event->hold_until = tv;
228
229 return internal_event;
230 }
231
232 static ik_event_t *
ik_event_new(char * buffer)233 ik_event_new (char *buffer)
234 {
235 struct inotify_event *kevent = (struct inotify_event *)buffer;
236 ik_event_t *event = g_new0 (ik_event_t, 1);
237
238 g_assert (buffer);
239
240 event->wd = kevent->wd;
241 event->mask = kevent->mask;
242 event->cookie = kevent->cookie;
243 event->len = kevent->len;
244 if (event->len)
245 event->name = g_strdup (kevent->name);
246 else
247 event->name = g_strdup ("");
248
249 return event;
250 }
251
252 ik_event_t *
_ik_event_new_dummy(const char * name,gint32 wd,guint32 mask)253 _ik_event_new_dummy (const char *name,
254 gint32 wd,
255 guint32 mask)
256 {
257 ik_event_t *event = g_new0 (ik_event_t, 1);
258 event->wd = wd;
259 event->mask = mask;
260 event->cookie = 0;
261 if (name)
262 event->name = g_strdup (name);
263 else
264 event->name = g_strdup("");
265
266 event->len = strlen (event->name);
267
268 return event;
269 }
270
271 void
_ik_event_free(ik_event_t * event)272 _ik_event_free (ik_event_t *event)
273 {
274 if (event->pair)
275 _ik_event_free (event->pair);
276 g_free (event->name);
277 g_free (event);
278 }
279
280 gint32
_ik_watch(const char * path,guint32 mask,int * err)281 _ik_watch (const char *path,
282 guint32 mask,
283 int *err)
284 {
285 gint32 wd = -1;
286
287 g_assert (path != NULL);
288 g_assert (inotify_instance_fd >= 0);
289
290 wd = inotify_add_watch (inotify_instance_fd, path, mask);
291
292 if (wd < 0)
293 {
294 int e = errno;
295 /* FIXME: debug msg failed to add watch */
296 if (err)
297 *err = e;
298 return wd;
299 }
300
301 g_assert (wd >= 0);
302 return wd;
303 }
304
305 int
_ik_ignore(const char * path,gint32 wd)306 _ik_ignore (const char *path,
307 gint32 wd)
308 {
309 g_assert (wd >= 0);
310 g_assert (inotify_instance_fd >= 0);
311
312 if (inotify_rm_watch (inotify_instance_fd, wd) < 0)
313 {
314 /* int e = errno; */
315 /* failed to rm watch */
316 return -1;
317 }
318
319 return 0;
320 }
321
322 void
_ik_move_stats(guint32 * matches,guint32 * misses)323 _ik_move_stats (guint32 *matches,
324 guint32 *misses)
325 {
326 if (matches)
327 *matches = ik_move_matches;
328
329 if (misses)
330 *misses = ik_move_misses;
331 }
332
333 const char *
_ik_mask_to_string(guint32 mask)334 _ik_mask_to_string (guint32 mask)
335 {
336 gboolean is_dir = mask & IN_ISDIR;
337 mask &= ~IN_ISDIR;
338
339 if (is_dir)
340 {
341 switch (mask)
342 {
343 case IN_ACCESS:
344 return "ACCESS (dir)";
345 case IN_MODIFY:
346 return "MODIFY (dir)";
347 case IN_ATTRIB:
348 return "ATTRIB (dir)";
349 case IN_CLOSE_WRITE:
350 return "CLOSE_WRITE (dir)";
351 case IN_CLOSE_NOWRITE:
352 return "CLOSE_NOWRITE (dir)";
353 case IN_OPEN:
354 return "OPEN (dir)";
355 case IN_MOVED_FROM:
356 return "MOVED_FROM (dir)";
357 case IN_MOVED_TO:
358 return "MOVED_TO (dir)";
359 case IN_DELETE:
360 return "DELETE (dir)";
361 case IN_CREATE:
362 return "CREATE (dir)";
363 case IN_DELETE_SELF:
364 return "DELETE_SELF (dir)";
365 case IN_UNMOUNT:
366 return "UNMOUNT (dir)";
367 case IN_Q_OVERFLOW:
368 return "Q_OVERFLOW (dir)";
369 case IN_IGNORED:
370 return "IGNORED (dir)";
371 default:
372 return "UNKNOWN_EVENT (dir)";
373 }
374 }
375 else
376 {
377 switch (mask)
378 {
379 case IN_ACCESS:
380 return "ACCESS";
381 case IN_MODIFY:
382 return "MODIFY";
383 case IN_ATTRIB:
384 return "ATTRIB";
385 case IN_CLOSE_WRITE:
386 return "CLOSE_WRITE";
387 case IN_CLOSE_NOWRITE:
388 return "CLOSE_NOWRITE";
389 case IN_OPEN:
390 return "OPEN";
391 case IN_MOVED_FROM:
392 return "MOVED_FROM";
393 case IN_MOVED_TO:
394 return "MOVED_TO";
395 case IN_DELETE:
396 return "DELETE";
397 case IN_CREATE:
398 return "CREATE";
399 case IN_DELETE_SELF:
400 return "DELETE_SELF";
401 case IN_UNMOUNT:
402 return "UNMOUNT";
403 case IN_Q_OVERFLOW:
404 return "Q_OVERFLOW";
405 case IN_IGNORED:
406 return "IGNORED";
407 default:
408 return "UNKNOWN_EVENT";
409 }
410 }
411 }
412
413
414 static void
ik_read_events(gsize * buffer_size_out,gchar ** buffer_out)415 ik_read_events (gsize *buffer_size_out,
416 gchar **buffer_out)
417 {
418 static gchar *buffer = NULL;
419 static gsize buffer_size;
420
421 /* Initialize the buffer on our first call */
422 if (buffer == NULL)
423 {
424 buffer_size = AVERAGE_EVENT_SIZE;
425 buffer_size *= MAX_QUEUED_EVENTS;
426 buffer = g_malloc (buffer_size);
427 }
428
429 *buffer_size_out = 0;
430 *buffer_out = NULL;
431
432 memset (buffer, 0, buffer_size);
433
434 if (g_io_channel_read_chars (inotify_read_ioc, (char *)buffer, buffer_size, buffer_size_out, NULL) != G_IO_STATUS_NORMAL) {
435 /* error reading */
436 }
437 *buffer_out = buffer;
438 }
439
440 static gboolean
ik_read_callback(gpointer user_data)441 ik_read_callback (gpointer user_data)
442 {
443 gchar *buffer;
444 gsize buffer_size, buffer_i, events;
445
446 G_LOCK (inotify_lock);
447 ik_read_events (&buffer_size, &buffer);
448
449 buffer_i = 0;
450 events = 0;
451 while (buffer_i < buffer_size)
452 {
453 struct inotify_event *event;
454 gsize event_size;
455 event = (struct inotify_event *)&buffer[buffer_i];
456 event_size = sizeof(struct inotify_event) + event->len;
457 g_queue_push_tail (events_to_process, ik_event_internal_new (ik_event_new (&buffer[buffer_i])));
458 buffer_i += event_size;
459 events++;
460 }
461
462 /* If the event process callback is off, turn it back on */
463 if (!process_eq_running && events)
464 {
465 process_eq_running = TRUE;
466 g_timeout_add (PROCESS_EVENTS_TIME, ik_process_eq_callback, NULL);
467 }
468
469 G_UNLOCK (inotify_lock);
470
471 return TRUE;
472 }
473
474 static gboolean
g_timeval_lt(GTimeVal * val1,GTimeVal * val2)475 g_timeval_lt (GTimeVal *val1,
476 GTimeVal *val2)
477 {
478 if (val1->tv_sec < val2->tv_sec)
479 return TRUE;
480
481 if (val1->tv_sec > val2->tv_sec)
482 return FALSE;
483
484 /* val1->tv_sec == val2->tv_sec */
485 if (val1->tv_usec < val2->tv_usec)
486 return TRUE;
487
488 return FALSE;
489 }
490
491 static gboolean
g_timeval_eq(GTimeVal * val1,GTimeVal * val2)492 g_timeval_eq (GTimeVal *val1,
493 GTimeVal *val2)
494 {
495 return (val1->tv_sec == val2->tv_sec) && (val1->tv_usec == val2->tv_usec);
496 }
497
498 static void
ik_pair_events(ik_event_internal_t * event1,ik_event_internal_t * event2)499 ik_pair_events (ik_event_internal_t *event1,
500 ik_event_internal_t *event2)
501 {
502 g_assert (event1 && event2);
503 /* We should only be pairing events that have the same cookie */
504 g_assert (event1->event->cookie == event2->event->cookie);
505 /* We shouldn't pair an event that already is paired */
506 g_assert (event1->pair == NULL && event2->pair == NULL);
507
508 /* Pair the internal structures and the ik_event_t structures */
509 event1->pair = event2;
510 event1->event->pair = event2->event;
511
512 if (g_timeval_lt (&event1->hold_until, &event2->hold_until))
513 event1->hold_until = event2->hold_until;
514
515 event2->hold_until = event1->hold_until;
516 }
517
518 static void
ik_event_add_microseconds(ik_event_internal_t * event,glong ms)519 ik_event_add_microseconds (ik_event_internal_t *event,
520 glong ms)
521 {
522 g_assert (event);
523 g_time_val_add (&event->hold_until, ms);
524 }
525
526 static gboolean
ik_event_ready(ik_event_internal_t * event)527 ik_event_ready (ik_event_internal_t *event)
528 {
529 GTimeVal tv;
530 g_assert (event);
531
532 g_get_current_time (&tv);
533
534 /* An event is ready if,
535 *
536 * it has no cookie -- there is nothing to be gained by holding it
537 * or, it is already paired -- we don't need to hold it anymore
538 * or, we have held it long enough
539 */
540 return
541 event->event->cookie == 0 ||
542 event->pair != NULL ||
543 g_timeval_lt (&event->hold_until, &tv) ||
544 g_timeval_eq (&event->hold_until, &tv);
545 }
546
547 static void
ik_pair_moves(gpointer data,gpointer user_data)548 ik_pair_moves (gpointer data,
549 gpointer user_data)
550 {
551 ik_event_internal_t *event = (ik_event_internal_t *)data;
552
553 if (event->seen == TRUE || event->sent == TRUE)
554 return;
555
556 if (event->event->cookie != 0)
557 {
558 /* When we get a MOVED_FROM event we delay sending the event by
559 * MOVE_HOLD_UNTIL_TIME microseconds. We need to do this because a
560 * MOVED_TO pair _might_ be coming in the near future */
561 if (event->event->mask & IN_MOVED_FROM)
562 {
563 g_hash_table_insert (cookie_hash, GINT_TO_POINTER (event->event->cookie), event);
564 /* because we don't deliver move events there is no point in waiting for the match right now. */
565 ik_event_add_microseconds (event, MOVE_HOLD_UNTIL_TIME);
566 }
567 else if (event->event->mask & IN_MOVED_TO)
568 {
569 /* We need to check if we are waiting for this MOVED_TO events cookie to pair it with
570 * a MOVED_FROM */
571 ik_event_internal_t *match = NULL;
572 match = g_hash_table_lookup (cookie_hash, GINT_TO_POINTER (event->event->cookie));
573 if (match)
574 {
575 g_hash_table_remove (cookie_hash, GINT_TO_POINTER (event->event->cookie));
576 ik_pair_events (match, event);
577 }
578 }
579 }
580 event->seen = TRUE;
581 }
582
583 static void
ik_process_events(void)584 ik_process_events (void)
585 {
586 g_queue_foreach (events_to_process, ik_pair_moves, NULL);
587
588 while (!g_queue_is_empty (events_to_process))
589 {
590 ik_event_internal_t *event = g_queue_peek_head (events_to_process);
591
592 /* This must have been sent as part of a MOVED_TO/MOVED_FROM */
593 if (event->sent)
594 {
595 /* Pop event */
596 g_queue_pop_head (events_to_process);
597 /* Free the internal event structure */
598 g_free (event);
599 continue;
600 }
601
602 /* The event isn't ready yet */
603 if (!ik_event_ready (event))
604 break;
605
606 /* Pop it */
607 event = g_queue_pop_head (events_to_process);
608
609 /* Check if this is a MOVED_FROM that is also sitting in the cookie_hash */
610 if (event->event->cookie && event->pair == NULL &&
611 g_hash_table_lookup (cookie_hash, GINT_TO_POINTER (event->event->cookie)))
612 g_hash_table_remove (cookie_hash, GINT_TO_POINTER (event->event->cookie));
613
614 if (event->pair)
615 {
616 /* We send out paired MOVED_FROM/MOVED_TO events in the same event buffer */
617 /* g_assert (event->event->mask == IN_MOVED_FROM && event->pair->event->mask == IN_MOVED_TO); */
618 /* Copy the paired data */
619 event->pair->sent = TRUE;
620 event->sent = TRUE;
621 ik_move_matches++;
622 }
623 else if (event->event->cookie)
624 {
625 /* If we couldn't pair a MOVED_FROM and MOVED_TO together, we change
626 * the event masks */
627 /* Changeing MOVED_FROM to DELETE and MOVED_TO to create lets us make
628 * the gaurantee that you will never see a non-matched MOVE event */
629
630 if (event->event->mask & IN_MOVED_FROM)
631 {
632 event->event->mask = IN_DELETE|(event->event->mask & IN_ISDIR);
633 ik_move_misses++; /* not super accurate, if we aren't watching the destination it still counts as a miss */
634 }
635 if (event->event->mask & IN_MOVED_TO)
636 event->event->mask = IN_CREATE|(event->event->mask & IN_ISDIR);
637 }
638
639 /* Push the ik_event_t onto the event queue */
640 g_queue_push_tail (event_queue, event->event);
641 /* Free the internal event structure */
642 g_free (event);
643 }
644 }
645
646 static gboolean
ik_process_eq_callback(gpointer user_data)647 ik_process_eq_callback (gpointer user_data)
648 {
649 gboolean res;
650
651 /* Try and move as many events to the event queue */
652 G_LOCK (inotify_lock);
653 ik_process_events ();
654
655 while (!g_queue_is_empty (event_queue))
656 {
657 ik_event_t *event = g_queue_pop_head (event_queue);
658
659 user_cb (event);
660 }
661
662 res = TRUE;
663
664 if (g_queue_get_length (events_to_process) == 0)
665 {
666 process_eq_running = FALSE;
667 res = FALSE;
668 }
669
670 G_UNLOCK (inotify_lock);
671
672 return res;
673 }
674