Branch data Line data Source code
1 : : /*
2 : : * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 : : *
4 : : * This program is free software; you can redistribute it and/or modify
5 : : * it under the terms of the GNU General Public License as published by
6 : : * the Free Software Foundation; either version 2, or (at your option)
7 : : * any later version.
8 : : *
9 : : * This program is distributed in the hope that it will be useful,
10 : : * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 : : * GNU General Public License for more details.
13 : : *
14 : : * You should have received a copy of the GNU General Public License
15 : : * along with this program; see the file COPYING. If not, write to
16 : : * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 : : */
18 : :
19 : : /*
20 : : * Basic idea behind the notification queue: An fsnotify group (like inotify)
21 : : * sends the userspace notification about events asynchronously some time after
22 : : * the event happened. When inotify gets an event it will need to add that
23 : : * event to the group notify queue. Since a single event might need to be on
24 : : * multiple group's notification queues we can't add the event directly to each
25 : : * queue and instead add a small "event_holder" to each queue. This event_holder
26 : : * has a pointer back to the original event. Since the majority of events are
27 : : * going to end up on one, and only one, notification queue we embed one
28 : : * event_holder into each event. This means we have a single allocation instead
29 : : * of always needing two. If the embedded event_holder is already in use by
30 : : * another group a new event_holder (from fsnotify_event_holder_cachep) will be
31 : : * allocated and used.
32 : : */
33 : :
34 : : #include <linux/fs.h>
35 : : #include <linux/init.h>
36 : : #include <linux/kernel.h>
37 : : #include <linux/list.h>
38 : : #include <linux/module.h>
39 : : #include <linux/mount.h>
40 : : #include <linux/mutex.h>
41 : : #include <linux/namei.h>
42 : : #include <linux/path.h>
43 : : #include <linux/slab.h>
44 : : #include <linux/spinlock.h>
45 : :
46 : : #include <linux/atomic.h>
47 : :
48 : : #include <linux/fsnotify_backend.h>
49 : : #include "fsnotify.h"
50 : :
51 : : static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52 : :
53 : : /**
54 : : * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
55 : : * Called from fsnotify_move, which is inlined into filesystem modules.
56 : : */
57 : 0 : u32 fsnotify_get_cookie(void)
58 : : {
59 : 153087 : return atomic_inc_return(&fsnotify_sync_cookie);
60 : : }
61 : : EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
62 : :
63 : : /* return true if the notify queue is empty, false otherwise */
64 : 0 : bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
65 : : {
66 [ - + ]: 140906 : BUG_ON(!mutex_is_locked(&group->notification_mutex));
67 : 281812 : return list_empty(&group->notification_list) ? true : false;
68 : : }
69 : :
70 : 0 : void fsnotify_destroy_event(struct fsnotify_group *group,
71 : : struct fsnotify_event *event)
72 : : {
73 : : /* Overflow events are per-group and we don't want to free them */
74 [ + - ][ + - ]: 26098 : if (!event || event->mask == FS_Q_OVERFLOW)
[ + - ][ + - ]
75 : 0 : return;
76 : :
77 : 26098 : group->ops->free_event(event);
78 : : }
79 : :
80 : : /*
81 : : * Add an event to the group notification queue. The group can later pull this
82 : : * event off the queue to deal with. The function returns 0 if the event was
83 : : * added to the queue, 1 if the event was merged with some other queued event,
84 : : * 2 if the queue of events has overflown.
85 : : */
86 : 0 : int fsnotify_add_notify_event(struct fsnotify_group *group,
87 : : struct fsnotify_event *event,
88 : : int (*merge)(struct list_head *,
89 : : struct fsnotify_event *))
90 : : {
91 : : int ret = 0;
92 : 26098 : struct list_head *list = &group->notification_list;
93 : :
94 : : pr_debug("%s: group=%p event=%p\n", __func__, group, event);
95 : :
96 : 26098 : mutex_lock(&group->notification_mutex);
97 : :
98 [ - + ]: 26098 : if (group->q_len >= group->max_events) {
99 : : ret = 2;
100 : : /* Queue overflow event only if it isn't already queued */
101 [ # # ]: 0 : if (!list_empty(&group->overflow_event->list)) {
102 : 0 : mutex_unlock(&group->notification_mutex);
103 : 0 : return ret;
104 : : }
105 : : event = group->overflow_event;
106 : : goto queue;
107 : : }
108 : :
109 [ + + ][ + - ]: 26098 : if (!list_empty(list) && merge) {
110 : 6402 : ret = merge(list, event);
111 [ + + ]: 6402 : if (ret) {
112 : 79 : mutex_unlock(&group->notification_mutex);
113 : 79 : return ret;
114 : : }
115 : : }
116 : :
117 : : queue:
118 : 0 : group->q_len++;
119 : 0 : list_add_tail(&event->list, list);
120 : 26019 : mutex_unlock(&group->notification_mutex);
121 : :
122 : 26019 : wake_up(&group->notification_waitq);
123 : 26019 : kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
124 : 26019 : return ret;
125 : : }
126 : :
127 : : /*
128 : : * Remove and return the first event from the notification list. It is the
129 : : * responsibility of the caller to destroy the obtained event
130 : : */
131 : 0 : struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
132 : : {
133 : : struct fsnotify_event *event;
134 : :
135 [ - + ]: 26019 : BUG_ON(!mutex_is_locked(&group->notification_mutex));
136 : :
137 : : pr_debug("%s: group=%p\n", __func__, group);
138 : :
139 : 26019 : event = list_first_entry(&group->notification_list,
140 : : struct fsnotify_event, list);
141 : : /*
142 : : * We need to init list head for the case of overflow event so that
143 : : * check in fsnotify_add_notify_events() works
144 : : */
145 : 26019 : list_del_init(&event->list);
146 : 26019 : group->q_len--;
147 : :
148 : 26019 : return event;
149 : : }
150 : :
151 : : /*
152 : : * This will not remove the event, that must be done with fsnotify_remove_notify_event()
153 : : */
154 : 0 : struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
155 : : {
156 [ - + ]: 26017 : BUG_ON(!mutex_is_locked(&group->notification_mutex));
157 : :
158 : 26017 : return list_first_entry(&group->notification_list,
159 : : struct fsnotify_event, list);
160 : : }
161 : :
162 : : /*
163 : : * Called when a group is being torn down to clean up any outstanding
164 : : * event notifications.
165 : : */
166 : 0 : void fsnotify_flush_notify(struct fsnotify_group *group)
167 : : {
168 : : struct fsnotify_event *event;
169 : :
170 : 8 : mutex_lock(&group->notification_mutex);
171 [ + + ]: 10 : while (!fsnotify_notify_queue_is_empty(group)) {
172 : 2 : event = fsnotify_remove_notify_event(group);
173 : : fsnotify_destroy_event(group, event);
174 : : }
175 : 8 : mutex_unlock(&group->notification_mutex);
176 : 8 : }
177 : :
178 : : /*
179 : : * fsnotify_create_event - Allocate a new event which will be sent to each
180 : : * group's handle_event function if the group was interested in this
181 : : * particular event.
182 : : *
183 : : * @inode the inode which is supposed to receive the event (sometimes a
184 : : * parent of the inode to which the event happened.
185 : : * @mask what actually happened.
186 : : * @data pointer to the object which was actually affected
187 : : * @data_type flag indication if the data is a file, path, inode, nothing...
188 : : * @name the filename, if available
189 : : */
190 : 0 : void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
191 : : u32 mask)
192 : : {
193 : 26106 : INIT_LIST_HEAD(&event->list);
194 : 26106 : event->inode = inode;
195 : 26106 : event->mask = mask;
196 : 26106 : }
|