Branch data Line data Source code
1 : : /*
2 : : * Functions to sequence FLUSH and FUA writes.
3 : : *
4 : : * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 : : * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 : : *
7 : : * This file is released under the GPLv2.
8 : : *
9 : : * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 : : * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 : : * properties and hardware capability.
12 : : *
13 : : * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 : : * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 : : * that the device cache should be flushed before the data is executed, and
16 : : * REQ_FUA means that the data must be on non-volatile media on request
17 : : * completion.
18 : : *
19 : : * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 : : * difference. The requests are either completed immediately if there's no
21 : : * data or executed as normal requests otherwise.
22 : : *
23 : : * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 : : * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 : : *
26 : : * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 : : * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 : : *
29 : : * The actual execution of flush is double buffered. Whenever a request
30 : : * needs to execute PRE or POSTFLUSH, it queues at
31 : : * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
32 : : * flush is issued and the pending_idx is toggled. When the flush
33 : : * completes, all the requests which were pending are proceeded to the next
34 : : * step. This allows arbitrary merging of different types of FLUSH/FUA
35 : : * requests.
36 : : *
37 : : * Currently, the following conditions are used to determine when to issue
38 : : * flush.
39 : : *
40 : : * C1. At any given time, only one flush shall be in progress. This makes
41 : : * double buffering sufficient.
42 : : *
43 : : * C2. Flush is deferred if any request is executing DATA of its sequence.
44 : : * This avoids issuing separate POSTFLUSHes for requests which shared
45 : : * PREFLUSH.
46 : : *
47 : : * C3. The second condition is ignored if there is a request which has
48 : : * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 : : * starvation in the unlikely case where there are continuous stream of
50 : : * FUA (without FLUSH) requests.
51 : : *
52 : : * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 : : * is beneficial.
54 : : *
55 : : * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 : : * Once while executing DATA and again after the whole sequence is
57 : : * complete. The first completion updates the contained bio but doesn't
58 : : * finish it so that the bio submitter is notified only after the whole
59 : : * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
60 : : * req_bio_endio().
61 : : *
62 : : * The above peculiarity requires that each FLUSH/FUA request has only one
63 : : * bio attached to it, which is guaranteed as they aren't allowed to be
64 : : * merged in the usual way.
65 : : */
66 : :
67 : : #include <linux/kernel.h>
68 : : #include <linux/module.h>
69 : : #include <linux/bio.h>
70 : : #include <linux/blkdev.h>
71 : : #include <linux/gfp.h>
72 : : #include <linux/blk-mq.h>
73 : :
74 : : #include "blk.h"
75 : : #include "blk-mq.h"
76 : :
77 : : /* FLUSH/FUA sequences */
78 : : enum {
79 : : REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80 : : REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81 : : REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82 : : REQ_FSEQ_DONE = (1 << 3),
83 : :
84 : : REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 : : REQ_FSEQ_POSTFLUSH,
86 : :
87 : : /*
88 : : * If flush has been pending longer than the following timeout,
89 : : * it's issued even if flush_data requests are still in flight.
90 : : */
91 : : FLUSH_PENDING_TIMEOUT = 5 * HZ,
92 : : };
93 : :
94 : : static bool blk_kick_flush(struct request_queue *q);
95 : :
96 : 0 : static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97 : : {
98 : : unsigned int policy = 0;
99 : :
100 [ # # ]: 0 : if (blk_rq_sectors(rq))
101 : : policy |= REQ_FSEQ_DATA;
102 : :
103 [ # # ]: 0 : if (fflags & REQ_FLUSH) {
104 [ # # ]: 0 : if (rq->cmd_flags & REQ_FLUSH)
105 : 0 : policy |= REQ_FSEQ_PREFLUSH;
106 [ # # ][ # # ]: 0 : if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107 : 0 : policy |= REQ_FSEQ_POSTFLUSH;
108 : : }
109 : 0 : return policy;
110 : : }
111 : :
112 : 0 : static unsigned int blk_flush_cur_seq(struct request *rq)
113 : : {
114 : 0 : return 1 << ffz(rq->flush.seq);
115 : : }
116 : :
117 : 0 : static void blk_flush_restore_request(struct request *rq)
118 : : {
119 : : /*
120 : : * After flush data completion, @rq->bio is %NULL but we need to
121 : : * complete the bio again. @rq->biotail is guaranteed to equal the
122 : : * original @rq->bio. Restore it.
123 : : */
124 : 0 : rq->bio = rq->biotail;
125 : :
126 : : /* make @rq a normal request */
127 : 0 : rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128 : 0 : rq->end_io = rq->flush.saved_end_io;
129 : :
130 : : blk_clear_rq_complete(rq);
131 : 0 : }
132 : :
133 : 0 : static void mq_flush_data_run(struct work_struct *work)
134 : : {
135 : : struct request *rq;
136 : :
137 : 0 : rq = container_of(work, struct request, mq_flush_data);
138 : :
139 : 0 : memset(&rq->csd, 0, sizeof(rq->csd));
140 : 0 : blk_mq_run_request(rq, true, false);
141 : 0 : }
142 : :
143 : : static void blk_mq_flush_data_insert(struct request *rq)
144 : : {
145 : 0 : INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
146 : 0 : kblockd_schedule_work(rq->q, &rq->mq_flush_data);
147 : : }
148 : :
149 : : /**
150 : : * blk_flush_complete_seq - complete flush sequence
151 : : * @rq: FLUSH/FUA request being sequenced
152 : : * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
153 : : * @error: whether an error occurred
154 : : *
155 : : * @rq just completed @seq part of its flush sequence, record the
156 : : * completion and trigger the next step.
157 : : *
158 : : * CONTEXT:
159 : : * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
160 : : *
161 : : * RETURNS:
162 : : * %true if requests were added to the dispatch queue, %false otherwise.
163 : : */
164 : 0 : static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
165 : : int error)
166 : : {
167 : 0 : struct request_queue *q = rq->q;
168 : 0 : struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
169 : : bool queued = false, kicked;
170 : :
171 [ # # ]: 0 : BUG_ON(rq->flush.seq & seq);
172 : 0 : rq->flush.seq |= seq;
173 : :
174 [ # # ]: 0 : if (likely(!error))
175 : 0 : seq = blk_flush_cur_seq(rq);
176 : : else
177 : : seq = REQ_FSEQ_DONE;
178 : :
179 [ # # # # ]: 0 : switch (seq) {
180 : : case REQ_FSEQ_PREFLUSH:
181 : : case REQ_FSEQ_POSTFLUSH:
182 : : /* queue for flush */
183 [ # # ]: 0 : if (list_empty(pending))
184 : 0 : q->flush_pending_since = jiffies;
185 : 0 : list_move_tail(&rq->flush.list, pending);
186 : : break;
187 : :
188 : : case REQ_FSEQ_DATA:
189 : 0 : list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
190 [ # # ]: 0 : if (q->mq_ops)
191 : : blk_mq_flush_data_insert(rq);
192 : : else {
193 : 0 : list_add(&rq->queuelist, &q->queue_head);
194 : : queued = true;
195 : : }
196 : : break;
197 : :
198 : : case REQ_FSEQ_DONE:
199 : : /*
200 : : * @rq was previously adjusted by blk_flush_issue() for
201 : : * flush sequencing and may already have gone through the
202 : : * flush data request completion path. Restore @rq for
203 : : * normal completion and end it.
204 : : */
205 [ # # ]: 0 : BUG_ON(!list_empty(&rq->queuelist));
206 : 0 : list_del_init(&rq->flush.list);
207 : 0 : blk_flush_restore_request(rq);
208 [ # # ]: 0 : if (q->mq_ops)
209 : 0 : blk_mq_end_io(rq, error);
210 : : else
211 : 0 : __blk_end_request_all(rq, error);
212 : : break;
213 : :
214 : : default:
215 : 0 : BUG();
216 : : }
217 : :
218 : 0 : kicked = blk_kick_flush(q);
219 : : /* blk_mq_run_flush will run queue */
220 [ # # ]: 0 : if (q->mq_ops)
221 : : return queued;
222 : 0 : return kicked | queued;
223 : : }
224 : :
225 : 0 : static void flush_end_io(struct request *flush_rq, int error)
226 : : {
227 : 0 : struct request_queue *q = flush_rq->q;
228 : : struct list_head *running;
229 : : bool queued = false;
230 : 0 : struct request *rq, *n;
231 : : unsigned long flags = 0;
232 : :
233 [ # # ]: 0 : if (q->mq_ops) {
234 : 0 : blk_mq_free_request(flush_rq);
235 : 0 : spin_lock_irqsave(&q->mq_flush_lock, flags);
236 : : }
237 : 0 : running = &q->flush_queue[q->flush_running_idx];
238 [ # # ]: 0 : BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239 : :
240 : : /* account completion of the flush request */
241 : 0 : q->flush_running_idx ^= 1;
242 : :
243 [ # # ]: 0 : if (!q->mq_ops)
244 : 0 : elv_completed_request(q, flush_rq);
245 : :
246 : : /* and push the waiting requests to the next stage */
247 [ # # ]: 0 : list_for_each_entry_safe(rq, n, running, flush.list) {
248 : 0 : unsigned int seq = blk_flush_cur_seq(rq);
249 : :
250 [ # # ]: 0 : BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
251 : 0 : queued |= blk_flush_complete_seq(rq, seq, error);
252 : : }
253 : :
254 : : /*
255 : : * Kick the queue to avoid stall for two cases:
256 : : * 1. Moving a request silently to empty queue_head may stall the
257 : : * queue.
258 : : * 2. When flush request is running in non-queueable queue, the
259 : : * queue is hold. Restart the queue after flush request is finished
260 : : * to avoid stall.
261 : : * This function is called from request completion path and calling
262 : : * directly into request_fn may confuse the driver. Always use
263 : : * kblockd.
264 : : */
265 [ # # ][ # # ]: 0 : if (queued || q->flush_queue_delayed) {
266 [ # # ]: 0 : if (!q->mq_ops)
267 : 0 : blk_run_queue_async(q);
268 : : else
269 : : /*
270 : : * This can be optimized to only run queues with requests
271 : : * queued if necessary.
272 : : */
273 : 0 : blk_mq_run_queues(q, true);
274 : : }
275 : 0 : q->flush_queue_delayed = 0;
276 [ # # ]: 0 : if (q->mq_ops)
277 : : spin_unlock_irqrestore(&q->mq_flush_lock, flags);
278 : 0 : }
279 : :
280 : 0 : static void mq_flush_work(struct work_struct *work)
281 : : {
282 : : struct request_queue *q;
283 : : struct request *rq;
284 : :
285 : 0 : q = container_of(work, struct request_queue, mq_flush_work);
286 : :
287 : : /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
288 : 0 : rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
289 : : __GFP_WAIT|GFP_ATOMIC, true);
290 : 0 : rq->cmd_type = REQ_TYPE_FS;
291 : 0 : rq->end_io = flush_end_io;
292 : :
293 : 0 : blk_mq_run_request(rq, true, false);
294 : 0 : }
295 : :
296 : : /*
297 : : * We can't directly use q->flush_rq, because it doesn't have tag and is not in
298 : : * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
299 : : * so offload the work to workqueue.
300 : : *
301 : : * Note: we assume a flush request finished in any hardware queue will flush
302 : : * the whole disk cache.
303 : : */
304 : : static void mq_run_flush(struct request_queue *q)
305 : : {
306 : 0 : kblockd_schedule_work(q, &q->mq_flush_work);
307 : : }
308 : :
309 : : /**
310 : : * blk_kick_flush - consider issuing flush request
311 : : * @q: request_queue being kicked
312 : : *
313 : : * Flush related states of @q have changed, consider issuing flush request.
314 : : * Please read the comment at the top of this file for more info.
315 : : *
316 : : * CONTEXT:
317 : : * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
318 : : *
319 : : * RETURNS:
320 : : * %true if flush was issued, %false otherwise.
321 : : */
322 : 0 : static bool blk_kick_flush(struct request_queue *q)
323 : : {
324 : 0 : struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
325 : : struct request *first_rq =
326 : 0 : list_first_entry(pending, struct request, flush.list);
327 : :
328 : : /* C1 described at the top of this file */
329 [ # # ][ # # ]: 0 : if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
330 : : return false;
331 : :
332 : : /* C2 and C3 */
333 [ # # ]: 0 : if (!list_empty(&q->flush_data_in_flight) &&
334 [ # # ]: 0 : time_before(jiffies,
335 : : q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
336 : : return false;
337 : :
338 : : /*
339 : : * Issue flush and toggle pending_idx. This makes pending_idx
340 : : * different from running_idx, which means flush is in flight.
341 : : */
342 : 0 : q->flush_pending_idx ^= 1;
343 [ # # ]: 0 : if (q->mq_ops) {
344 : : mq_run_flush(q);
345 : 0 : return true;
346 : : }
347 : :
348 : 0 : blk_rq_init(q, &q->flush_rq);
349 : 0 : q->flush_rq.cmd_type = REQ_TYPE_FS;
350 : 0 : q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
351 : 0 : q->flush_rq.rq_disk = first_rq->rq_disk;
352 : 0 : q->flush_rq.end_io = flush_end_io;
353 : :
354 : 0 : list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
355 : 0 : return true;
356 : : }
357 : :
358 : 0 : static void flush_data_end_io(struct request *rq, int error)
359 : : {
360 : 0 : struct request_queue *q = rq->q;
361 : :
362 : : /*
363 : : * After populating an empty queue, kick it to avoid stall. Read
364 : : * the comment in flush_end_io().
365 : : */
366 [ # # ]: 0 : if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
367 : 0 : blk_run_queue_async(q);
368 : 0 : }
369 : :
370 : 0 : static void mq_flush_data_end_io(struct request *rq, int error)
371 : : {
372 : 0 : struct request_queue *q = rq->q;
373 : : struct blk_mq_hw_ctx *hctx;
374 : : struct blk_mq_ctx *ctx;
375 : : unsigned long flags;
376 : :
377 : 0 : ctx = rq->mq_ctx;
378 : 0 : hctx = q->mq_ops->map_queue(q, ctx->cpu);
379 : :
380 : : /*
381 : : * After populating an empty queue, kick it to avoid stall. Read
382 : : * the comment in flush_end_io().
383 : : */
384 : 0 : spin_lock_irqsave(&q->mq_flush_lock, flags);
385 [ # # ]: 0 : if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
386 : 0 : blk_mq_run_hw_queue(hctx, true);
387 : : spin_unlock_irqrestore(&q->mq_flush_lock, flags);
388 : 0 : }
389 : :
390 : : /**
391 : : * blk_insert_flush - insert a new FLUSH/FUA request
392 : : * @rq: request to insert
393 : : *
394 : : * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
395 : : * or __blk_mq_run_hw_queue() to dispatch request.
396 : : * @rq is being submitted. Analyze what needs to be done and put it on the
397 : : * right queue.
398 : : *
399 : : * CONTEXT:
400 : : * spin_lock_irq(q->queue_lock) in !mq case
401 : : */
402 : 0 : void blk_insert_flush(struct request *rq)
403 : : {
404 : 0 : struct request_queue *q = rq->q;
405 : 0 : unsigned int fflags = q->flush_flags; /* may change, cache */
406 : 0 : unsigned int policy = blk_flush_policy(fflags, rq);
407 : :
408 : : /*
409 : : * @policy now records what operations need to be done. Adjust
410 : : * REQ_FLUSH and FUA for the driver.
411 : : */
412 : 0 : rq->cmd_flags &= ~REQ_FLUSH;
413 [ # # ]: 0 : if (!(fflags & REQ_FUA))
414 : 0 : rq->cmd_flags &= ~REQ_FUA;
415 : :
416 : : /*
417 : : * An empty flush handed down from a stacking driver may
418 : : * translate into nothing if the underlying device does not
419 : : * advertise a write-back cache. In this case, simply
420 : : * complete the request.
421 : : */
422 [ # # ]: 0 : if (!policy) {
423 [ # # ]: 0 : if (q->mq_ops)
424 : 0 : blk_mq_end_io(rq, 0);
425 : : else
426 : 0 : __blk_end_bidi_request(rq, 0, 0, 0);
427 : : return;
428 : : }
429 : :
430 [ # # ]: 0 : BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
431 : :
432 : : /*
433 : : * If there's data but flush is not necessary, the request can be
434 : : * processed directly without going through flush machinery. Queue
435 : : * for normal execution.
436 : : */
437 [ # # ]: 0 : if ((policy & REQ_FSEQ_DATA) &&
438 : : !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
439 [ # # ]: 0 : if (q->mq_ops) {
440 : 0 : blk_mq_run_request(rq, false, true);
441 : : } else
442 : 0 : list_add_tail(&rq->queuelist, &q->queue_head);
443 : : return;
444 : : }
445 : :
446 : : /*
447 : : * @rq should go through flush machinery. Mark it part of flush
448 : : * sequence and submit for further processing.
449 : : */
450 : 0 : memset(&rq->flush, 0, sizeof(rq->flush));
451 : 0 : INIT_LIST_HEAD(&rq->flush.list);
452 : 0 : rq->cmd_flags |= REQ_FLUSH_SEQ;
453 : 0 : rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
454 [ # # ]: 0 : if (q->mq_ops) {
455 : 0 : rq->end_io = mq_flush_data_end_io;
456 : :
457 : : spin_lock_irq(&q->mq_flush_lock);
458 : 0 : blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
459 : : spin_unlock_irq(&q->mq_flush_lock);
460 : : return;
461 : : }
462 : 0 : rq->end_io = flush_data_end_io;
463 : :
464 : 0 : blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
465 : : }
466 : :
467 : : /**
468 : : * blk_abort_flushes - @q is being aborted, abort flush requests
469 : : * @q: request_queue being aborted
470 : : *
471 : : * To be called from elv_abort_queue(). @q is being aborted. Prepare all
472 : : * FLUSH/FUA requests for abortion.
473 : : *
474 : : * CONTEXT:
475 : : * spin_lock_irq(q->queue_lock)
476 : : */
477 : 0 : void blk_abort_flushes(struct request_queue *q)
478 : : {
479 : : struct request *rq, *n;
480 : : int i;
481 : :
482 : : /*
483 : : * Requests in flight for data are already owned by the dispatch
484 : : * queue or the device driver. Just restore for normal completion.
485 : : */
486 [ # # ]: 0 : list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
487 : : list_del_init(&rq->flush.list);
488 : 0 : blk_flush_restore_request(rq);
489 : : }
490 : :
491 : : /*
492 : : * We need to give away requests on flush queues. Restore for
493 : : * normal completion and put them on the dispatch queue.
494 : : */
495 [ # # ]: 0 : for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
496 [ # # ]: 0 : list_for_each_entry_safe(rq, n, &q->flush_queue[i],
497 : : flush.list) {
498 : : list_del_init(&rq->flush.list);
499 : 0 : blk_flush_restore_request(rq);
500 : 0 : list_add_tail(&rq->queuelist, &q->queue_head);
501 : : }
502 : : }
503 : 0 : }
504 : :
505 : : /**
506 : : * blkdev_issue_flush - queue a flush
507 : : * @bdev: blockdev to issue flush for
508 : : * @gfp_mask: memory allocation flags (for bio_alloc)
509 : : * @error_sector: error sector
510 : : *
511 : : * Description:
512 : : * Issue a flush for the block device in question. Caller can supply
513 : : * room for storing the error offset in case of a flush error, if they
514 : : * wish to. If WAIT flag is not passed then caller may check only what
515 : : * request was pushed in some internal queue for later handling.
516 : : */
517 : 0 : int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
518 : : sector_t *error_sector)
519 : : {
520 : : struct request_queue *q;
521 : : struct bio *bio;
522 : : int ret = 0;
523 : :
524 [ + + ]: 3194 : if (bdev->bd_disk == NULL)
525 : : return -ENXIO;
526 : :
527 : : q = bdev_get_queue(bdev);
528 [ + ]: 3193 : if (!q)
529 : : return -ENXIO;
530 : :
531 : : /*
532 : : * some block devices may not have their queue correctly set up here
533 : : * (e.g. loop device without a backing file) and so issuing a flush
534 : : * here will panic. Ensure there is a request function before issuing
535 : : * the flush.
536 : : */
537 [ + - ]: 3194 : if (!q->make_request_fn)
538 : : return -ENXIO;
539 : :
540 : : bio = bio_alloc(gfp_mask, 0);
541 : 3194 : bio->bi_bdev = bdev;
542 : :
543 : 3194 : ret = submit_bio_wait(WRITE_FLUSH, bio);
544 : :
545 : : /*
546 : : * The driver must store the error location in ->bi_sector, if
547 : : * it supports it. For non-stacked drivers, this should be
548 : : * copied from blk_rq_pos(rq).
549 : : */
550 [ - + ]: 3194 : if (error_sector)
551 : 0 : *error_sector = bio->bi_sector;
552 : :
553 : 3194 : bio_put(bio);
554 : 3192 : return ret;
555 : : }
556 : : EXPORT_SYMBOL(blkdev_issue_flush);
557 : :
558 : 0 : void blk_mq_init_flush(struct request_queue *q)
559 : : {
560 : 0 : spin_lock_init(&q->mq_flush_lock);
561 : 0 : INIT_WORK(&q->mq_flush_work, mq_flush_work);
562 : 0 : }
|