Branch data Line data Source code
1 : : #ifndef _LINUX_RCULIST_H
2 : : #define _LINUX_RCULIST_H
3 : :
4 : : #ifdef __KERNEL__
5 : :
6 : : /*
7 : : * RCU-protected list version
8 : : */
9 : : #include <linux/list.h>
10 : : #include <linux/rcupdate.h>
11 : :
12 : : /*
13 : : * Why is there no list_empty_rcu()? Because list_empty() serves this
14 : : * purpose. The list_empty() function fetches the RCU-protected pointer
15 : : * and compares it to the address of the list head, but neither dereferences
16 : : * this pointer itself nor provides this pointer to the caller. Therefore,
17 : : * it is not necessary to use rcu_dereference(), so that list_empty() can
18 : : * be used anywhere you would want to use a list_empty_rcu().
19 : : */
20 : :
21 : : /*
22 : : * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
23 : : * @list: list to be initialized
24 : : *
25 : : * You should instead use INIT_LIST_HEAD() for normal initialization and
26 : : * cleanup tasks, when readers have no access to the list being initialized.
27 : : * However, if the list being initialized is visible to readers, you
28 : : * need to keep the compiler from being too mischievous.
29 : : */
30 : : static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
31 : : {
32 : 0 : ACCESS_ONCE(list->next) = list;
33 : 0 : ACCESS_ONCE(list->prev) = list;
34 : : }
35 : :
36 : : /*
37 : : * return the ->next pointer of a list_head in an rcu safe
38 : : * way, we must not access it directly
39 : : */
40 : : #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
41 : :
42 : : /*
43 : : * Insert a new entry between two known consecutive entries.
44 : : *
45 : : * This is only for internal list manipulation where we know
46 : : * the prev/next entries already!
47 : : */
48 : : #ifndef CONFIG_DEBUG_LIST
49 : : static inline void __list_add_rcu(struct list_head *new,
50 : : struct list_head *prev, struct list_head *next)
51 : : {
52 : 1187052 : new->next = next;
53 : 1187052 : new->prev = prev;
54 : 1184801 : rcu_assign_pointer(list_next_rcu(prev), new);
55 : 60261 : next->prev = new;
56 : : }
57 : : #else
58 : : void __list_add_rcu(struct list_head *new,
59 : : struct list_head *prev, struct list_head *next);
60 : : #endif
61 : :
62 : : /**
63 : : * list_add_rcu - add a new entry to rcu-protected list
64 : : * @new: new entry to be added
65 : : * @head: list head to add it after
66 : : *
67 : : * Insert a new entry after the specified head.
68 : : * This is good for implementing stacks.
69 : : *
70 : : * The caller must take whatever precautions are necessary
71 : : * (such as holding appropriate locks) to avoid racing
72 : : * with another list-mutation primitive, such as list_add_rcu()
73 : : * or list_del_rcu(), running on this same list.
74 : : * However, it is perfectly legal to run concurrently with
75 : : * the _rcu list-traversal primitives, such as
76 : : * list_for_each_entry_rcu().
77 : : */
78 : : static inline void list_add_rcu(struct list_head *new, struct list_head *head)
79 : : {
80 : 77263 : __list_add_rcu(new, head, head->next);
81 : : }
82 : :
83 : : /**
84 : : * list_add_tail_rcu - add a new entry to rcu-protected list
85 : : * @new: new entry to be added
86 : : * @head: list head to add it before
87 : : *
88 : : * Insert a new entry before the specified head.
89 : : * This is useful for implementing queues.
90 : : *
91 : : * The caller must take whatever precautions are necessary
92 : : * (such as holding appropriate locks) to avoid racing
93 : : * with another list-mutation primitive, such as list_add_tail_rcu()
94 : : * or list_del_rcu(), running on this same list.
95 : : * However, it is perfectly legal to run concurrently with
96 : : * the _rcu list-traversal primitives, such as
97 : : * list_for_each_entry_rcu().
98 : : */
99 : : static inline void list_add_tail_rcu(struct list_head *new,
100 : : struct list_head *head)
101 : : {
102 : 1109789 : __list_add_rcu(new, head->prev, head);
103 : : }
104 : :
105 : : /**
106 : : * list_del_rcu - deletes entry from list without re-initialization
107 : : * @entry: the element to delete from the list.
108 : : *
109 : : * Note: list_empty() on entry does not return true after this,
110 : : * the entry is in an undefined state. It is useful for RCU based
111 : : * lockfree traversal.
112 : : *
113 : : * In particular, it means that we can not poison the forward
114 : : * pointers that may still be used for walking the list.
115 : : *
116 : : * The caller must take whatever precautions are necessary
117 : : * (such as holding appropriate locks) to avoid racing
118 : : * with another list-mutation primitive, such as list_del_rcu()
119 : : * or list_add_rcu(), running on this same list.
120 : : * However, it is perfectly legal to run concurrently with
121 : : * the _rcu list-traversal primitives, such as
122 : : * list_for_each_entry_rcu().
123 : : *
124 : : * Note that the caller is not permitted to immediately free
125 : : * the newly deleted entry. Instead, either synchronize_rcu()
126 : : * or call_rcu() must be used to defer freeing until an RCU
127 : : * grace period has elapsed.
128 : : */
129 : : static inline void list_del_rcu(struct list_head *entry)
130 : : {
131 : : __list_del_entry(entry);
132 : 1178611 : entry->prev = LIST_POISON2;
133 : : }
134 : :
135 : : /**
136 : : * hlist_del_init_rcu - deletes entry from hash list with re-initialization
137 : : * @n: the element to delete from the hash list.
138 : : *
139 : : * Note: list_unhashed() on the node return true after this. It is
140 : : * useful for RCU based read lockfree traversal if the writer side
141 : : * must know if the list entry is still hashed or already unhashed.
142 : : *
143 : : * In particular, it means that we can not poison the forward pointers
144 : : * that may still be used for walking the hash list and we can only
145 : : * zero the pprev pointer so list_unhashed() will return true after
146 : : * this.
147 : : *
148 : : * The caller must take whatever precautions are necessary (such as
149 : : * holding appropriate locks) to avoid racing with another
150 : : * list-mutation primitive, such as hlist_add_head_rcu() or
151 : : * hlist_del_rcu(), running on this same list. However, it is
152 : : * perfectly legal to run concurrently with the _rcu list-traversal
153 : : * primitives, such as hlist_for_each_entry_rcu().
154 : : */
155 : 135 : static inline void hlist_del_init_rcu(struct hlist_node *n)
156 : : {
157 [ + + ][ + - ]: 135 : if (!hlist_unhashed(n)) {
[ # # ][ + + ]
[ # # ]
158 : : __hlist_del(n);
159 : 131 : n->pprev = NULL;
160 : : }
161 : : }
162 : :
163 : : /**
164 : : * list_replace_rcu - replace old entry by new one
165 : : * @old : the element to be replaced
166 : : * @new : the new element to insert
167 : : *
168 : : * The @old entry will be replaced with the @new entry atomically.
169 : : * Note: @old should not be empty.
170 : : */
171 : : static inline void list_replace_rcu(struct list_head *old,
172 : : struct list_head *new)
173 : : {
174 : 0 : new->next = old->next;
175 : 0 : new->prev = old->prev;
176 : 0 : rcu_assign_pointer(list_next_rcu(new->prev), new);
177 : 0 : new->next->prev = new;
178 : 0 : old->prev = LIST_POISON2;
179 : : }
180 : :
181 : : /**
182 : : * list_splice_init_rcu - splice an RCU-protected list into an existing list.
183 : : * @list: the RCU-protected list to splice
184 : : * @head: the place in the list to splice the first list into
185 : : * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
186 : : *
187 : : * @head can be RCU-read traversed concurrently with this function.
188 : : *
189 : : * Note that this function blocks.
190 : : *
191 : : * Important note: the caller must take whatever action is necessary to
192 : : * prevent any other updates to @head. In principle, it is possible
193 : : * to modify the list as soon as sync() begins execution.
194 : : * If this sort of thing becomes necessary, an alternative version
195 : : * based on call_rcu() could be created. But only if -really-
196 : : * needed -- there is no shortage of RCU API members.
197 : : */
198 : : static inline void list_splice_init_rcu(struct list_head *list,
199 : : struct list_head *head,
200 : : void (*sync)(void))
201 : : {
202 : : struct list_head *first = list->next;
203 : 0 : struct list_head *last = list->prev;
204 : : struct list_head *at = head->next;
205 : :
206 [ # # ]: 0 : if (list_empty(list))
207 : : return;
208 : :
209 : : /*
210 : : * "first" and "last" tracking list, so initialize it. RCU readers
211 : : * have access to this list, so we must use INIT_LIST_HEAD_RCU()
212 : : * instead of INIT_LIST_HEAD().
213 : : */
214 : :
215 : : INIT_LIST_HEAD_RCU(list);
216 : :
217 : : /*
218 : : * At this point, the list body still points to the source list.
219 : : * Wait for any readers to finish using the list before splicing
220 : : * the list body into the new list. Any new readers will see
221 : : * an empty list.
222 : : */
223 : :
224 : 0 : sync();
225 : :
226 : : /*
227 : : * Readers are finished with the source list, so perform splice.
228 : : * The order is important if the new list is global and accessible
229 : : * to concurrent RCU readers. Note that RCU readers are not
230 : : * permitted to traverse the prev pointers without excluding
231 : : * this function.
232 : : */
233 : :
234 : 0 : last->next = at;
235 : 0 : rcu_assign_pointer(list_next_rcu(head), first);
236 : 0 : first->prev = head;
237 : 0 : at->prev = last;
238 : : }
239 : :
240 : : /**
241 : : * list_entry_rcu - get the struct for this entry
242 : : * @ptr: the &struct list_head pointer.
243 : : * @type: the type of the struct this is embedded in.
244 : : * @member: the name of the list_struct within the struct.
245 : : *
246 : : * This primitive may safely run concurrently with the _rcu list-mutation
247 : : * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
248 : : */
249 : : #define list_entry_rcu(ptr, type, member) \
250 : : ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
251 : : container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
252 : : })
253 : :
254 : : /**
255 : : * Where are list_empty_rcu() and list_first_entry_rcu()?
256 : : *
257 : : * Implementing those functions following their counterparts list_empty() and
258 : : * list_first_entry() is not advisable because they lead to subtle race
259 : : * conditions as the following snippet shows:
260 : : *
261 : : * if (!list_empty_rcu(mylist)) {
262 : : * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
263 : : * do_something(bar);
264 : : * }
265 : : *
266 : : * The list may not be empty when list_empty_rcu checks it, but it may be when
267 : : * list_first_entry_rcu rereads the ->next pointer.
268 : : *
269 : : * Rereading the ->next pointer is not a problem for list_empty() and
270 : : * list_first_entry() because they would be protected by a lock that blocks
271 : : * writers.
272 : : *
273 : : * See list_first_or_null_rcu for an alternative.
274 : : */
275 : :
276 : : /**
277 : : * list_first_or_null_rcu - get the first element from a list
278 : : * @ptr: the list head to take the element from.
279 : : * @type: the type of the struct this is embedded in.
280 : : * @member: the name of the list_struct within the struct.
281 : : *
282 : : * Note that if the list is empty, it returns NULL.
283 : : *
284 : : * This primitive may safely run concurrently with the _rcu list-mutation
285 : : * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
286 : : */
287 : : #define list_first_or_null_rcu(ptr, type, member) \
288 : : ({struct list_head *__ptr = (ptr); \
289 : : struct list_head *__next = ACCESS_ONCE(__ptr->next); \
290 : : likely(__ptr != __next) ? \
291 : : list_entry_rcu(__next, type, member) : NULL; \
292 : : })
293 : :
294 : : /**
295 : : * list_for_each_entry_rcu - iterate over rcu list of given type
296 : : * @pos: the type * to use as a loop cursor.
297 : : * @head: the head for your list.
298 : : * @member: the name of the list_struct within the struct.
299 : : *
300 : : * This list-traversal primitive may safely run concurrently with
301 : : * the _rcu list-mutation primitives such as list_add_rcu()
302 : : * as long as the traversal is guarded by rcu_read_lock().
303 : : */
304 : : #define list_for_each_entry_rcu(pos, head, member) \
305 : : for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
306 : : &pos->member != (head); \
307 : : pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
308 : :
309 : : /**
310 : : * list_for_each_entry_continue_rcu - continue iteration over list of given type
311 : : * @pos: the type * to use as a loop cursor.
312 : : * @head: the head for your list.
313 : : * @member: the name of the list_struct within the struct.
314 : : *
315 : : * Continue to iterate over list of given type, continuing after
316 : : * the current position.
317 : : */
318 : : #define list_for_each_entry_continue_rcu(pos, head, member) \
319 : : for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
320 : : &pos->member != (head); \
321 : : pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
322 : :
323 : : /**
324 : : * hlist_del_rcu - deletes entry from hash list without re-initialization
325 : : * @n: the element to delete from the hash list.
326 : : *
327 : : * Note: list_unhashed() on entry does not return true after this,
328 : : * the entry is in an undefined state. It is useful for RCU based
329 : : * lockfree traversal.
330 : : *
331 : : * In particular, it means that we can not poison the forward
332 : : * pointers that may still be used for walking the hash list.
333 : : *
334 : : * The caller must take whatever precautions are necessary
335 : : * (such as holding appropriate locks) to avoid racing
336 : : * with another list-mutation primitive, such as hlist_add_head_rcu()
337 : : * or hlist_del_rcu(), running on this same list.
338 : : * However, it is perfectly legal to run concurrently with
339 : : * the _rcu list-traversal primitives, such as
340 : : * hlist_for_each_entry().
341 : : */
342 : : static inline void hlist_del_rcu(struct hlist_node *n)
343 : : {
344 : : __hlist_del(n);
345 : 4414043 : n->pprev = LIST_POISON2;
346 : : }
347 : :
348 : : /**
349 : : * hlist_replace_rcu - replace old entry by new one
350 : : * @old : the element to be replaced
351 : : * @new : the new element to insert
352 : : *
353 : : * The @old entry will be replaced with the @new entry atomically.
354 : : */
355 : : static inline void hlist_replace_rcu(struct hlist_node *old,
356 : : struct hlist_node *new)
357 : : {
358 : 0 : struct hlist_node *next = old->next;
359 : :
360 : 0 : new->next = next;
361 : 0 : new->pprev = old->pprev;
362 : 0 : rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
363 [ # # ]: 0 : if (next)
364 : 0 : new->next->pprev = &new->next;
365 : 0 : old->pprev = LIST_POISON2;
366 : : }
367 : :
368 : : /*
369 : : * return the first or the next element in an RCU protected hlist
370 : : */
371 : : #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
372 : : #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
373 : : #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
374 : :
375 : : /**
376 : : * hlist_add_head_rcu
377 : : * @n: the element to add to the hash list.
378 : : * @h: the list to add to.
379 : : *
380 : : * Description:
381 : : * Adds the specified element to the specified hlist,
382 : : * while permitting racing traversals.
383 : : *
384 : : * The caller must take whatever precautions are necessary
385 : : * (such as holding appropriate locks) to avoid racing
386 : : * with another list-mutation primitive, such as hlist_add_head_rcu()
387 : : * or hlist_del_rcu(), running on this same list.
388 : : * However, it is perfectly legal to run concurrently with
389 : : * the _rcu list-traversal primitives, such as
390 : : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
391 : : * problems on Alpha CPUs. Regardless of the type of CPU, the
392 : : * list-traversal primitive must be guarded by rcu_read_lock().
393 : : */
394 : : static inline void hlist_add_head_rcu(struct hlist_node *n,
395 : : struct hlist_head *h)
396 : : {
397 : 4414125 : struct hlist_node *first = h->first;
398 : :
399 : 4414155 : n->next = first;
400 : 4414155 : n->pprev = &h->first;
401 : 4414155 : rcu_assign_pointer(hlist_first_rcu(h), n);
402 [ + + + + : 4414155 : if (first)
+ + # # ]
403 : 2229342 : first->pprev = &n->next;
404 : : }
405 : :
406 : : /**
407 : : * hlist_add_before_rcu
408 : : * @n: the new element to add to the hash list.
409 : : * @next: the existing element to add the new element before.
410 : : *
411 : : * Description:
412 : : * Adds the specified element to the specified hlist
413 : : * before the specified node while permitting racing traversals.
414 : : *
415 : : * The caller must take whatever precautions are necessary
416 : : * (such as holding appropriate locks) to avoid racing
417 : : * with another list-mutation primitive, such as hlist_add_head_rcu()
418 : : * or hlist_del_rcu(), running on this same list.
419 : : * However, it is perfectly legal to run concurrently with
420 : : * the _rcu list-traversal primitives, such as
421 : : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
422 : : * problems on Alpha CPUs.
423 : : */
424 : : static inline void hlist_add_before_rcu(struct hlist_node *n,
425 : : struct hlist_node *next)
426 : : {
427 : 5 : n->pprev = next->pprev;
428 : 5 : n->next = next;
429 : 5 : rcu_assign_pointer(hlist_pprev_rcu(n), n);
430 : 5 : next->pprev = &n->next;
431 : : }
432 : :
433 : : /**
434 : : * hlist_add_after_rcu
435 : : * @prev: the existing element to add the new element after.
436 : : * @n: the new element to add to the hash list.
437 : : *
438 : : * Description:
439 : : * Adds the specified element to the specified hlist
440 : : * after the specified node while permitting racing traversals.
441 : : *
442 : : * The caller must take whatever precautions are necessary
443 : : * (such as holding appropriate locks) to avoid racing
444 : : * with another list-mutation primitive, such as hlist_add_head_rcu()
445 : : * or hlist_del_rcu(), running on this same list.
446 : : * However, it is perfectly legal to run concurrently with
447 : : * the _rcu list-traversal primitives, such as
448 : : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
449 : : * problems on Alpha CPUs.
450 : : */
451 : : static inline void hlist_add_after_rcu(struct hlist_node *prev,
452 : : struct hlist_node *n)
453 : : {
454 : 0 : n->next = prev->next;
455 : 0 : n->pprev = &prev->next;
456 : 0 : rcu_assign_pointer(hlist_next_rcu(prev), n);
457 [ # # ]: 0 : if (n->next)
458 : 0 : n->next->pprev = &n->next;
459 : : }
460 : :
461 : : #define __hlist_for_each_rcu(pos, head) \
462 : : for (pos = rcu_dereference(hlist_first_rcu(head)); \
463 : : pos; \
464 : : pos = rcu_dereference(hlist_next_rcu(pos)))
465 : :
466 : : /**
467 : : * hlist_for_each_entry_rcu - iterate over rcu list of given type
468 : : * @pos: the type * to use as a loop cursor.
469 : : * @head: the head for your list.
470 : : * @member: the name of the hlist_node within the struct.
471 : : *
472 : : * This list-traversal primitive may safely run concurrently with
473 : : * the _rcu list-mutation primitives such as hlist_add_head_rcu()
474 : : * as long as the traversal is guarded by rcu_read_lock().
475 : : */
476 : : #define hlist_for_each_entry_rcu(pos, head, member) \
477 : : for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
478 : : typeof(*(pos)), member); \
479 : : pos; \
480 : : pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
481 : : &(pos)->member)), typeof(*(pos)), member))
482 : :
483 : : /**
484 : : * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
485 : : * @pos: the type * to use as a loop cursor.
486 : : * @head: the head for your list.
487 : : * @member: the name of the hlist_node within the struct.
488 : : *
489 : : * This list-traversal primitive may safely run concurrently with
490 : : * the _rcu list-mutation primitives such as hlist_add_head_rcu()
491 : : * as long as the traversal is guarded by rcu_read_lock().
492 : : *
493 : : * This is the same as hlist_for_each_entry_rcu() except that it does
494 : : * not do any RCU debugging or tracing.
495 : : */
496 : : #define hlist_for_each_entry_rcu_notrace(pos, head, member) \
497 : : for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
498 : : typeof(*(pos)), member); \
499 : : pos; \
500 : : pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
501 : : &(pos)->member)), typeof(*(pos)), member))
502 : :
503 : : /**
504 : : * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
505 : : * @pos: the type * to use as a loop cursor.
506 : : * @head: the head for your list.
507 : : * @member: the name of the hlist_node within the struct.
508 : : *
509 : : * This list-traversal primitive may safely run concurrently with
510 : : * the _rcu list-mutation primitives such as hlist_add_head_rcu()
511 : : * as long as the traversal is guarded by rcu_read_lock().
512 : : */
513 : : #define hlist_for_each_entry_rcu_bh(pos, head, member) \
514 : : for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
515 : : typeof(*(pos)), member); \
516 : : pos; \
517 : : pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
518 : : &(pos)->member)), typeof(*(pos)), member))
519 : :
520 : : /**
521 : : * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
522 : : * @pos: the type * to use as a loop cursor.
523 : : * @member: the name of the hlist_node within the struct.
524 : : */
525 : : #define hlist_for_each_entry_continue_rcu(pos, member) \
526 : : for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
527 : : typeof(*(pos)), member); \
528 : : pos; \
529 : : pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
530 : : typeof(*(pos)), member))
531 : :
532 : : /**
533 : : * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
534 : : * @pos: the type * to use as a loop cursor.
535 : : * @member: the name of the hlist_node within the struct.
536 : : */
537 : : #define hlist_for_each_entry_continue_rcu_bh(pos, member) \
538 : : for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
539 : : typeof(*(pos)), member); \
540 : : pos; \
541 : : pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
542 : : typeof(*(pos)), member))
543 : :
544 : :
545 : : #endif /* __KERNEL__ */
546 : : #endif
|