Branch data Line data Source code
1 : : #include <linux/atomic.h>
2 : : #include <linux/rwsem.h>
3 : : #include <linux/percpu.h>
4 : : #include <linux/wait.h>
5 : : #include <linux/lockdep.h>
6 : : #include <linux/percpu-rwsem.h>
7 : : #include <linux/rcupdate.h>
8 : : #include <linux/sched.h>
9 : : #include <linux/errno.h>
10 : :
11 : 0 : int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 : : const char *name, struct lock_class_key *rwsem_key)
13 : : {
14 : 0 : brw->fast_read_ctr = alloc_percpu(int);
15 [ # # ]: 0 : if (unlikely(!brw->fast_read_ctr))
16 : : return -ENOMEM;
17 : :
18 : : /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 : 0 : __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 : 0 : atomic_set(&brw->write_ctr, 0);
21 : 0 : atomic_set(&brw->slow_read_ctr, 0);
22 : 0 : init_waitqueue_head(&brw->write_waitq);
23 : 0 : return 0;
24 : : }
25 : :
26 : 0 : void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
27 : : {
28 : 0 : free_percpu(brw->fast_read_ctr);
29 : 0 : brw->fast_read_ctr = NULL; /* catch use after free bugs */
30 : 0 : }
31 : :
32 : : /*
33 : : * This is the fast-path for down_read/up_read, it only needs to ensure
34 : : * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
35 : : * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
36 : : * serialize with the preempt-disabled section below.
37 : : *
38 : : * The nontrivial part is that we should guarantee acquire/release semantics
39 : : * in case when
40 : : *
41 : : * R_W: down_write() comes after up_read(), the writer should see all
42 : : * changes done by the reader
43 : : * or
44 : : * W_R: down_read() comes after up_write(), the reader should see all
45 : : * changes done by the writer
46 : : *
47 : : * If this helper fails the callers rely on the normal rw_semaphore and
48 : : * atomic_dec_and_test(), so in this case we have the necessary barriers.
49 : : *
50 : : * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
51 : : * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
52 : : * reader inside the critical section. See the comments in down_write and
53 : : * up_write below.
54 : : */
55 : : static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
56 : : {
57 : : bool success = false;
58 : :
59 : 2202639 : preempt_disable();
60 [ + + + ]: 2202638 : if (likely(!atomic_read(&brw->write_ctr))) {
61 : 2202638 : __this_cpu_add(*brw->fast_read_ctr, val);
62 : : success = true;
63 : : }
64 : 2202638 : preempt_enable();
65 : :
66 : : return success;
67 : : }
68 : :
69 : : /*
70 : : * Like the normal down_read() this is not recursive, the writer can
71 : : * come after the first percpu_down_read() and create the deadlock.
72 : : *
73 : : * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
74 : : * percpu_up_read() does rwsem_release(). This pairs with the usage
75 : : * of ->rw_sem in percpu_down/up_write().
76 : : */
77 : 0 : void percpu_down_read(struct percpu_rw_semaphore *brw)
78 : : {
79 : : might_sleep();
80 [ - + ]: 1101314 : if (likely(update_fast_ctr(brw, +1))) {
81 : : rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
82 : 1101314 : return;
83 : : }
84 : :
85 : 0 : down_read(&brw->rw_sem);
86 : 0 : atomic_inc(&brw->slow_read_ctr);
87 : : /* avoid up_read()->rwsem_release() */
88 : 0 : __up_read(&brw->rw_sem);
89 : : }
90 : :
91 : 0 : void percpu_up_read(struct percpu_rw_semaphore *brw)
92 : : {
93 : : rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
94 : :
95 [ - + ]: 1101324 : if (likely(update_fast_ctr(brw, -1)))
96 : 1101324 : return;
97 : :
98 : : /* false-positive is possible but harmless */
99 [ # # ]: 0 : if (atomic_dec_and_test(&brw->slow_read_ctr))
100 : 0 : wake_up_all(&brw->write_waitq);
101 : : }
102 : :
103 : 0 : static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
104 : : {
105 : : unsigned int sum = 0;
106 : : int cpu;
107 : :
108 [ # # ]: 0 : for_each_possible_cpu(cpu) {
109 : 0 : sum += per_cpu(*brw->fast_read_ctr, cpu);
110 : 0 : per_cpu(*brw->fast_read_ctr, cpu) = 0;
111 : : }
112 : :
113 : 0 : return sum;
114 : : }
115 : :
116 : : /*
117 : : * A writer increments ->write_ctr to force the readers to switch to the
118 : : * slow mode, note the atomic_read() check in update_fast_ctr().
119 : : *
120 : : * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
121 : : * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
122 : : * counter it represents the number of active readers.
123 : : *
124 : : * Finally the writer takes ->rw_sem for writing and blocks the new readers,
125 : : * then waits until the slow counter becomes zero.
126 : : */
127 : 0 : void percpu_down_write(struct percpu_rw_semaphore *brw)
128 : : {
129 : : /* tell update_fast_ctr() there is a pending writer */
130 : 0 : atomic_inc(&brw->write_ctr);
131 : : /*
132 : : * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
133 : : * so that update_fast_ctr() can't succeed.
134 : : *
135 : : * 2. Ensures we see the result of every previous this_cpu_add() in
136 : : * update_fast_ctr().
137 : : *
138 : : * 3. Ensures that if any reader has exited its critical section via
139 : : * fast-path, it executes a full memory barrier before we return.
140 : : * See R_W case in the comment above update_fast_ctr().
141 : : */
142 : 0 : synchronize_sched_expedited();
143 : :
144 : : /* exclude other writers, and block the new readers completely */
145 : 0 : down_write(&brw->rw_sem);
146 : :
147 : : /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
148 : 0 : atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
149 : :
150 : : /* wait for all readers to complete their percpu_up_read() */
151 [ # # ][ # # ]: 0 : wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
152 : 0 : }
153 : :
154 : 0 : void percpu_up_write(struct percpu_rw_semaphore *brw)
155 : : {
156 : : /* release the lock, but the readers can't use the fast-path */
157 : 0 : up_write(&brw->rw_sem);
158 : : /*
159 : : * Insert the barrier before the next fast-path in down_read,
160 : : * see W_R case in the comment above update_fast_ctr().
161 : : */
162 : 0 : synchronize_sched_expedited();
163 : : /* the last writer unblocks update_fast_ctr() */
164 : 0 : atomic_dec(&brw->write_ctr);
165 : 0 : }
|