Branch data Line data Source code
1 : : /*
2 : : * Functions related to interrupt-poll handling in the block layer. This
3 : : * is similar to NAPI for network devices.
4 : : */
5 : : #include <linux/kernel.h>
6 : : #include <linux/module.h>
7 : : #include <linux/init.h>
8 : : #include <linux/bio.h>
9 : : #include <linux/blkdev.h>
10 : : #include <linux/interrupt.h>
11 : : #include <linux/cpu.h>
12 : : #include <linux/blk-iopoll.h>
13 : : #include <linux/delay.h>
14 : :
15 : : #include "blk.h"
16 : :
17 : : int blk_iopoll_enabled = 1;
18 : : EXPORT_SYMBOL(blk_iopoll_enabled);
19 : :
20 : : static unsigned int blk_iopoll_budget __read_mostly = 256;
21 : :
22 : : static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
23 : :
24 : : /**
25 : : * blk_iopoll_sched - Schedule a run of the iopoll handler
26 : : * @iop: The parent iopoll structure
27 : : *
28 : : * Description:
29 : : * Add this blk_iopoll structure to the pending poll list and trigger the
30 : : * raise of the blk iopoll softirq. The driver must already have gotten a
31 : : * successful return from blk_iopoll_sched_prep() before calling this.
32 : : **/
33 : 0 : void blk_iopoll_sched(struct blk_iopoll *iop)
34 : : {
35 : : unsigned long flags;
36 : :
37 : : local_irq_save(flags);
38 : 0 : list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
39 : 0 : __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
40 [ # # ]: 0 : local_irq_restore(flags);
41 : 0 : }
42 : : EXPORT_SYMBOL(blk_iopoll_sched);
43 : :
44 : : /**
45 : : * __blk_iopoll_complete - Mark this @iop as un-polled again
46 : : * @iop: The parent iopoll structure
47 : : *
48 : : * Description:
49 : : * See blk_iopoll_complete(). This function must be called with interrupts
50 : : * disabled.
51 : : **/
52 : 0 : void __blk_iopoll_complete(struct blk_iopoll *iop)
53 : : {
54 : : list_del(&iop->list);
55 : 0 : smp_mb__before_clear_bit();
56 : 0 : clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
57 : 0 : }
58 : : EXPORT_SYMBOL(__blk_iopoll_complete);
59 : :
60 : : /**
61 : : * blk_iopoll_complete - Mark this @iop as un-polled again
62 : : * @iop: The parent iopoll structure
63 : : *
64 : : * Description:
65 : : * If a driver consumes less than the assigned budget in its run of the
66 : : * iopoll handler, it'll end the polled mode by calling this function. The
67 : : * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
68 : : * is called.
69 : : **/
70 : 0 : void blk_iopoll_complete(struct blk_iopoll *iopoll)
71 : : {
72 : : unsigned long flags;
73 : :
74 : : local_irq_save(flags);
75 : 0 : __blk_iopoll_complete(iopoll);
76 [ # # ]: 0 : local_irq_restore(flags);
77 : 0 : }
78 : : EXPORT_SYMBOL(blk_iopoll_complete);
79 : :
80 : 0 : static void blk_iopoll_softirq(struct softirq_action *h)
81 : : {
82 : 156 : struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
83 : 78 : int rearm = 0, budget = blk_iopoll_budget;
84 : 78 : unsigned long start_time = jiffies;
85 : :
86 : : local_irq_disable();
87 : :
88 [ - + ]: 156 : while (!list_empty(list)) {
89 : : struct blk_iopoll *iop;
90 : : int work, weight;
91 : :
92 : : /*
93 : : * If softirq window is exhausted then punt.
94 : : */
95 [ # # ][ # # ]: 0 : if (budget <= 0 || time_after(jiffies, start_time)) {
96 : : rearm = 1;
97 : : break;
98 : : }
99 : :
100 : : local_irq_enable();
101 : :
102 : : /* Even though interrupts have been re-enabled, this
103 : : * access is safe because interrupts can only add new
104 : : * entries to the tail of this list, and only ->poll()
105 : : * calls can remove this head entry from the list.
106 : : */
107 : 0 : iop = list_entry(list->next, struct blk_iopoll, list);
108 : :
109 : 0 : weight = iop->weight;
110 : : work = 0;
111 [ # # ]: 0 : if (test_bit(IOPOLL_F_SCHED, &iop->state))
112 : 0 : work = iop->poll(iop, weight);
113 : :
114 : 0 : budget -= work;
115 : :
116 : : local_irq_disable();
117 : :
118 : : /*
119 : : * Drivers must not modify the iopoll state, if they
120 : : * consume their assigned weight (or more, some drivers can't
121 : : * easily just stop processing, they have to complete an
122 : : * entire mask of commands).In such cases this code
123 : : * still "owns" the iopoll instance and therefore can
124 : : * move the instance around on the list at-will.
125 : : */
126 [ # # ]: 0 : if (work >= weight) {
127 [ # # ]: 0 : if (blk_iopoll_disable_pending(iop))
128 : 0 : __blk_iopoll_complete(iop);
129 : : else
130 : 0 : list_move_tail(&iop->list, list);
131 : : }
132 : : }
133 : :
134 [ - + ]: 78 : if (rearm)
135 : 0 : __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
136 : :
137 : : local_irq_enable();
138 : 78 : }
139 : :
140 : : /**
141 : : * blk_iopoll_disable - Disable iopoll on this @iop
142 : : * @iop: The parent iopoll structure
143 : : *
144 : : * Description:
145 : : * Disable io polling and wait for any pending callbacks to have completed.
146 : : **/
147 : 0 : void blk_iopoll_disable(struct blk_iopoll *iop)
148 : : {
149 : 0 : set_bit(IOPOLL_F_DISABLE, &iop->state);
150 [ # # ]: 0 : while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))
151 : 0 : msleep(1);
152 : 0 : clear_bit(IOPOLL_F_DISABLE, &iop->state);
153 : 0 : }
154 : : EXPORT_SYMBOL(blk_iopoll_disable);
155 : :
156 : : /**
157 : : * blk_iopoll_enable - Enable iopoll on this @iop
158 : : * @iop: The parent iopoll structure
159 : : *
160 : : * Description:
161 : : * Enable iopoll on this @iop. Note that the handler run will not be
162 : : * scheduled, it will only mark it as active.
163 : : **/
164 : 0 : void blk_iopoll_enable(struct blk_iopoll *iop)
165 : : {
166 [ # # ]: 0 : BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
167 : 0 : smp_mb__before_clear_bit();
168 : 0 : clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
169 : 0 : }
170 : : EXPORT_SYMBOL(blk_iopoll_enable);
171 : :
172 : : /**
173 : : * blk_iopoll_init - Initialize this @iop
174 : : * @iop: The parent iopoll structure
175 : : * @weight: The default weight (or command completion budget)
176 : : * @poll_fn: The handler to invoke
177 : : *
178 : : * Description:
179 : : * Initialize this blk_iopoll structure. Before being actively used, the
180 : : * driver must call blk_iopoll_enable().
181 : : **/
182 : 0 : void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
183 : : {
184 : 0 : memset(iop, 0, sizeof(*iop));
185 : 0 : INIT_LIST_HEAD(&iop->list);
186 : 0 : iop->weight = weight;
187 : 0 : iop->poll = poll_fn;
188 : 0 : set_bit(IOPOLL_F_SCHED, &iop->state);
189 : 0 : }
190 : : EXPORT_SYMBOL(blk_iopoll_init);
191 : :
192 : 0 : static int blk_iopoll_cpu_notify(struct notifier_block *self,
193 : : unsigned long action, void *hcpu)
194 : : {
195 : : /*
196 : : * If a CPU goes away, splice its entries to the current CPU
197 : : * and trigger a run of the softirq
198 : : */
199 [ + + ]: 555 : if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
200 : 78 : int cpu = (unsigned long) hcpu;
201 : :
202 : : local_irq_disable();
203 : 78 : list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
204 : 156 : this_cpu_ptr(&blk_cpu_iopoll));
205 : 78 : __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
206 : : local_irq_enable();
207 : : }
208 : :
209 : 555 : return NOTIFY_OK;
210 : : }
211 : :
212 : : static struct notifier_block blk_iopoll_cpu_notifier = {
213 : : .notifier_call = blk_iopoll_cpu_notify,
214 : : };
215 : :
216 : 0 : static __init int blk_iopoll_setup(void)
217 : : {
218 : : int i;
219 : :
220 [ # # ]: 0 : for_each_possible_cpu(i)
221 : 0 : INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
222 : :
223 : 0 : open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);
224 : 0 : register_hotcpu_notifier(&blk_iopoll_cpu_notifier);
225 : 0 : return 0;
226 : : }
227 : : subsys_initcall(blk_iopoll_setup);
|