Branch data Line data Source code
1 : : /*
2 : : * Copyright (C) 1991, 1992 Linus Torvalds
3 : : * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 : : * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 : : * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 : : * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 : : * - July2000
8 : : * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 : : */
10 : :
11 : : /*
12 : : * This handles all read/write requests to block devices
13 : : */
14 : : #include <linux/kernel.h>
15 : : #include <linux/module.h>
16 : : #include <linux/backing-dev.h>
17 : : #include <linux/bio.h>
18 : : #include <linux/blkdev.h>
19 : : #include <linux/blk-mq.h>
20 : : #include <linux/highmem.h>
21 : : #include <linux/mm.h>
22 : : #include <linux/kernel_stat.h>
23 : : #include <linux/string.h>
24 : : #include <linux/init.h>
25 : : #include <linux/completion.h>
26 : : #include <linux/slab.h>
27 : : #include <linux/swap.h>
28 : : #include <linux/writeback.h>
29 : : #include <linux/task_io_accounting_ops.h>
30 : : #include <linux/fault-inject.h>
31 : : #include <linux/list_sort.h>
32 : : #include <linux/delay.h>
33 : : #include <linux/ratelimit.h>
34 : : #include <linux/pm_runtime.h>
35 : :
36 : : #define CREATE_TRACE_POINTS
37 : : #include <trace/events/block.h>
38 : :
39 : : #include "blk.h"
40 : : #include "blk-cgroup.h"
41 : :
42 : : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
43 : : EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
44 : : EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
45 : : EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
46 : :
47 : : DEFINE_IDA(blk_queue_ida);
48 : :
49 : : /*
50 : : * For the allocated request tables
51 : : */
52 : : struct kmem_cache *request_cachep = NULL;
53 : :
54 : : /*
55 : : * For queue allocation
56 : : */
57 : : struct kmem_cache *blk_requestq_cachep;
58 : :
59 : : /*
60 : : * Controlling structure to kblockd
61 : : */
62 : : static struct workqueue_struct *kblockd_workqueue;
63 : :
64 : 0 : void blk_queue_congestion_threshold(struct request_queue *q)
65 : : {
66 : : int nr;
67 : :
68 : 0 : nr = q->nr_requests - (q->nr_requests / 8) + 1;
69 [ # # ]: 0 : if (nr > q->nr_requests)
70 : 0 : nr = q->nr_requests;
71 : 0 : q->nr_congestion_on = nr;
72 : :
73 : 0 : nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
74 [ # # ]: 0 : if (nr < 1)
75 : : nr = 1;
76 : 0 : q->nr_congestion_off = nr;
77 : 0 : }
78 : :
79 : : /**
80 : : * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
81 : : * @bdev: device
82 : : *
83 : : * Locates the passed device's request queue and returns the address of its
84 : : * backing_dev_info
85 : : *
86 : : * Will return NULL if the request queue cannot be located.
87 : : */
88 : 0 : struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
89 : : {
90 : : struct backing_dev_info *ret = NULL;
91 : : struct request_queue *q = bdev_get_queue(bdev);
92 : :
93 [ # # ]: 0 : if (q)
94 : 0 : ret = &q->backing_dev_info;
95 : 0 : return ret;
96 : : }
97 : : EXPORT_SYMBOL(blk_get_backing_dev_info);
98 : :
99 : 0 : void blk_rq_init(struct request_queue *q, struct request *rq)
100 : : {
101 : 435066 : memset(rq, 0, sizeof(*rq));
102 : :
103 : 435066 : INIT_LIST_HEAD(&rq->queuelist);
104 : 435066 : INIT_LIST_HEAD(&rq->timeout_list);
105 : 435066 : rq->cpu = -1;
106 : 435066 : rq->q = q;
107 : 435066 : rq->__sector = (sector_t) -1;
108 : : INIT_HLIST_NODE(&rq->hash);
109 : 435066 : RB_CLEAR_NODE(&rq->rb_node);
110 : 435066 : rq->cmd = rq->__cmd;
111 : 435066 : rq->cmd_len = BLK_MAX_CDB;
112 : 435066 : rq->tag = -1;
113 : 435066 : rq->start_time = jiffies;
114 : : set_start_time_ns(rq);
115 : 435066 : rq->part = NULL;
116 : 435066 : }
117 : : EXPORT_SYMBOL(blk_rq_init);
118 : :
119 : 0 : static void req_bio_endio(struct request *rq, struct bio *bio,
120 : : unsigned int nbytes, int error)
121 : : {
122 [ - + ]: 494579 : if (error)
123 : 0 : clear_bit(BIO_UPTODATE, &bio->bi_flags);
124 [ - + ]: 494579 : else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
125 : : error = -EIO;
126 : :
127 [ - + ]: 494579 : if (unlikely(rq->cmd_flags & REQ_QUIET))
128 : 0 : set_bit(BIO_QUIET, &bio->bi_flags);
129 : :
130 : 494579 : bio_advance(bio, nbytes);
131 : :
132 : : /* don't actually finish bio if it's part of flush sequence */
133 [ + - ][ + - ]: 494579 : if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
134 : 494579 : bio_endio(bio, error);
135 : 0 : }
136 : :
137 : 0 : void blk_dump_rq_flags(struct request *rq, char *msg)
138 : : {
139 : : int bit;
140 : :
141 [ # # ]: 0 : printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
142 : 0 : rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
143 : : (unsigned long long) rq->cmd_flags);
144 : :
145 : 0 : printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
146 : : (unsigned long long)blk_rq_pos(rq),
147 : : blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
148 : 0 : printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
149 : : rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
150 : :
151 [ # # ]: 0 : if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
152 : 0 : printk(KERN_INFO " cdb: ");
153 [ # # ]: 0 : for (bit = 0; bit < BLK_MAX_CDB; bit++)
154 : 0 : printk("%02x ", rq->cmd[bit]);
155 : 0 : printk("\n");
156 : : }
157 : 0 : }
158 : : EXPORT_SYMBOL(blk_dump_rq_flags);
159 : :
160 : 0 : static void blk_delay_work(struct work_struct *work)
161 : : {
162 : : struct request_queue *q;
163 : :
164 : 1174 : q = container_of(work, struct request_queue, delay_work.work);
165 : 1174 : spin_lock_irq(q->queue_lock);
166 : 1174 : __blk_run_queue(q);
167 : 1174 : spin_unlock_irq(q->queue_lock);
168 : 1174 : }
169 : :
170 : : /**
171 : : * blk_delay_queue - restart queueing after defined interval
172 : : * @q: The &struct request_queue in question
173 : : * @msecs: Delay in msecs
174 : : *
175 : : * Description:
176 : : * Sometimes queueing needs to be postponed for a little while, to allow
177 : : * resources to come back. This function will make sure that queueing is
178 : : * restarted around the specified time. Queue lock must be held.
179 : : */
180 : 0 : void blk_delay_queue(struct request_queue *q, unsigned long msecs)
181 : : {
182 [ # # ]: 0 : if (likely(!blk_queue_dead(q)))
183 : 0 : queue_delayed_work(kblockd_workqueue, &q->delay_work,
184 : : msecs_to_jiffies(msecs));
185 : 0 : }
186 : : EXPORT_SYMBOL(blk_delay_queue);
187 : :
188 : : /**
189 : : * blk_start_queue - restart a previously stopped queue
190 : : * @q: The &struct request_queue in question
191 : : *
192 : : * Description:
193 : : * blk_start_queue() will clear the stop flag on the queue, and call
194 : : * the request_fn for the queue if it was in a stopped state when
195 : : * entered. Also see blk_stop_queue(). Queue lock must be held.
196 : : **/
197 : 0 : void blk_start_queue(struct request_queue *q)
198 : : {
199 [ # # ]: 0 : WARN_ON(!irqs_disabled());
200 : :
201 : : queue_flag_clear(QUEUE_FLAG_STOPPED, q);
202 : 0 : __blk_run_queue(q);
203 : 0 : }
204 : : EXPORT_SYMBOL(blk_start_queue);
205 : :
206 : : /**
207 : : * blk_stop_queue - stop a queue
208 : : * @q: The &struct request_queue in question
209 : : *
210 : : * Description:
211 : : * The Linux block layer assumes that a block driver will consume all
212 : : * entries on the request queue when the request_fn strategy is called.
213 : : * Often this will not happen, because of hardware limitations (queue
214 : : * depth settings). If a device driver gets a 'queue full' response,
215 : : * or if it simply chooses not to queue more I/O at one point, it can
216 : : * call this function to prevent the request_fn from being called until
217 : : * the driver has signalled it's ready to go again. This happens by calling
218 : : * blk_start_queue() to restart queue operations. Queue lock must be held.
219 : : **/
220 : 0 : void blk_stop_queue(struct request_queue *q)
221 : : {
222 : 0 : cancel_delayed_work(&q->delay_work);
223 : : queue_flag_set(QUEUE_FLAG_STOPPED, q);
224 : 0 : }
225 : : EXPORT_SYMBOL(blk_stop_queue);
226 : :
227 : : /**
228 : : * blk_sync_queue - cancel any pending callbacks on a queue
229 : : * @q: the queue
230 : : *
231 : : * Description:
232 : : * The block layer may perform asynchronous callback activity
233 : : * on a queue, such as calling the unplug function after a timeout.
234 : : * A block device may call blk_sync_queue to ensure that any
235 : : * such activity is cancelled, thus allowing it to release resources
236 : : * that the callbacks might use. The caller must already have made sure
237 : : * that its ->make_request_fn will not re-add plugging prior to calling
238 : : * this function.
239 : : *
240 : : * This function does not cancel any asynchronous activity arising
241 : : * out of elevator or throttling code. That would require elevaotor_exit()
242 : : * and blkcg_exit_queue() to be called with queue lock initialized.
243 : : *
244 : : */
245 : 0 : void blk_sync_queue(struct request_queue *q)
246 : : {
247 : 0 : del_timer_sync(&q->timeout);
248 : 0 : cancel_delayed_work_sync(&q->delay_work);
249 : 0 : }
250 : : EXPORT_SYMBOL(blk_sync_queue);
251 : :
252 : : /**
253 : : * __blk_run_queue_uncond - run a queue whether or not it has been stopped
254 : : * @q: The queue to run
255 : : *
256 : : * Description:
257 : : * Invoke request handling on a queue if there are any pending requests.
258 : : * May be used to restart request handling after a request has completed.
259 : : * This variant runs the queue whether or not the queue has been
260 : : * stopped. Must be called with the queue lock held and interrupts
261 : : * disabled. See also @blk_run_queue.
262 : : */
263 : 0 : inline void __blk_run_queue_uncond(struct request_queue *q)
264 : : {
265 [ + - ][ # # ]: 1017713 : if (unlikely(blk_queue_dead(q)))
266 : 0 : return;
267 : :
268 : : /*
269 : : * Some request_fn implementations, e.g. scsi_request_fn(), unlock
270 : : * the queue lock internally. As a result multiple threads may be
271 : : * running such a request function concurrently. Keep track of the
272 : : * number of active request_fn invocations such that blk_drain_queue()
273 : : * can wait until all these request_fn calls have finished.
274 : : */
275 : 1017713 : q->request_fn_active++;
276 : 1017713 : q->request_fn(q);
277 : 1017713 : q->request_fn_active--;
278 : : }
279 : :
280 : : /**
281 : : * __blk_run_queue - run a single device queue
282 : : * @q: The queue to run
283 : : *
284 : : * Description:
285 : : * See @blk_run_queue. This variant must be called with the queue lock
286 : : * held and interrupts disabled.
287 : : */
288 : 0 : void __blk_run_queue(struct request_queue *q)
289 : : {
290 [ + - ]: 1017713 : if (unlikely(blk_queue_stopped(q)))
291 : 0 : return;
292 : :
293 : : __blk_run_queue_uncond(q);
294 : : }
295 : : EXPORT_SYMBOL(__blk_run_queue);
296 : :
297 : : /**
298 : : * blk_run_queue_async - run a single device queue in workqueue context
299 : : * @q: The queue to run
300 : : *
301 : : * Description:
302 : : * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
303 : : * of us. The caller must hold the queue lock.
304 : : */
305 : 0 : void blk_run_queue_async(struct request_queue *q)
306 : : {
307 [ + - ][ + - ]: 1175 : if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
308 : 1175 : mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
309 : 0 : }
310 : : EXPORT_SYMBOL(blk_run_queue_async);
311 : :
312 : : /**
313 : : * blk_run_queue - run a single device queue
314 : : * @q: The queue to run
315 : : *
316 : : * Description:
317 : : * Invoke request handling on this queue, if it has pending work to do.
318 : : * May be used to restart queueing when a request has completed.
319 : : */
320 : 0 : void blk_run_queue(struct request_queue *q)
321 : : {
322 : : unsigned long flags;
323 : :
324 : 433718 : spin_lock_irqsave(q->queue_lock, flags);
325 : 433718 : __blk_run_queue(q);
326 : 433718 : spin_unlock_irqrestore(q->queue_lock, flags);
327 : 433718 : }
328 : : EXPORT_SYMBOL(blk_run_queue);
329 : :
330 : 0 : void blk_put_queue(struct request_queue *q)
331 : : {
332 : 0 : kobject_put(&q->kobj);
333 : 0 : }
334 : : EXPORT_SYMBOL(blk_put_queue);
335 : :
336 : : /**
337 : : * __blk_drain_queue - drain requests from request_queue
338 : : * @q: queue to drain
339 : : * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
340 : : *
341 : : * Drain requests from @q. If @drain_all is set, all requests are drained.
342 : : * If not, only ELVPRIV requests are drained. The caller is responsible
343 : : * for ensuring that no new requests which need to be drained are queued.
344 : : */
345 : 0 : static void __blk_drain_queue(struct request_queue *q, bool drain_all)
346 : : __releases(q->queue_lock)
347 : : __acquires(q->queue_lock)
348 : : {
349 : : int i;
350 : :
351 : : lockdep_assert_held(q->queue_lock);
352 : :
353 : : while (true) {
354 : : bool drain = false;
355 : :
356 : : /*
357 : : * The caller might be trying to drain @q before its
358 : : * elevator is initialized.
359 : : */
360 [ # # ]: 0 : if (q->elevator)
361 : 0 : elv_drain_elevator(q);
362 : :
363 : : blkcg_drain_queue(q);
364 : :
365 : : /*
366 : : * This function might be called on a queue which failed
367 : : * driver init after queue creation or is not yet fully
368 : : * active yet. Some drivers (e.g. fd and loop) get unhappy
369 : : * in such cases. Kick queue iff dispatch queue has
370 : : * something on it and @q has request_fn set.
371 : : */
372 [ # # ][ # # ]: 0 : if (!list_empty(&q->queue_head) && q->request_fn)
373 : 0 : __blk_run_queue(q);
374 : :
375 : 0 : drain |= q->nr_rqs_elvpriv;
376 : 0 : drain |= q->request_fn_active;
377 : :
378 : : /*
379 : : * Unfortunately, requests are queued at and tracked from
380 : : * multiple places and there's no single counter which can
381 : : * be drained. Check all the queues and counters.
382 : : */
383 [ # # ]: 0 : if (drain_all) {
384 : 0 : drain |= !list_empty(&q->queue_head);
385 [ # # ]: 0 : for (i = 0; i < 2; i++) {
386 : 0 : drain |= q->nr_rqs[i];
387 : 0 : drain |= q->in_flight[i];
388 : 0 : drain |= !list_empty(&q->flush_queue[i]);
389 : : }
390 : : }
391 : :
392 [ # # ]: 0 : if (!drain)
393 : : break;
394 : :
395 : 0 : spin_unlock_irq(q->queue_lock);
396 : :
397 : 0 : msleep(10);
398 : :
399 : 0 : spin_lock_irq(q->queue_lock);
400 : : }
401 : :
402 : : /*
403 : : * With queue marked dead, any woken up waiter will fail the
404 : : * allocation path, so the wakeup chaining is lost and we're
405 : : * left with hung waiters. We need to wake up those waiters.
406 : : */
407 [ # # ]: 0 : if (q->request_fn) {
408 : : struct request_list *rl;
409 : :
410 [ # # ]: 0 : blk_queue_for_each_rl(rl, q)
411 [ # # ]: 0 : for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
412 : 0 : wake_up_all(&rl->wait[i]);
413 : : }
414 : 0 : }
415 : :
416 : : /**
417 : : * blk_queue_bypass_start - enter queue bypass mode
418 : : * @q: queue of interest
419 : : *
420 : : * In bypass mode, only the dispatch FIFO queue of @q is used. This
421 : : * function makes @q enter bypass mode and drains all requests which were
422 : : * throttled or issued before. On return, it's guaranteed that no request
423 : : * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
424 : : * inside queue or RCU read lock.
425 : : */
426 : 0 : void blk_queue_bypass_start(struct request_queue *q)
427 : : {
428 : : bool drain;
429 : :
430 : 0 : spin_lock_irq(q->queue_lock);
431 : 0 : drain = !q->bypass_depth++;
432 : : queue_flag_set(QUEUE_FLAG_BYPASS, q);
433 : 0 : spin_unlock_irq(q->queue_lock);
434 : :
435 [ # # ]: 0 : if (drain) {
436 : 0 : spin_lock_irq(q->queue_lock);
437 : 0 : __blk_drain_queue(q, false);
438 : 0 : spin_unlock_irq(q->queue_lock);
439 : :
440 : : /* ensure blk_queue_bypass() is %true inside RCU read lock */
441 : : synchronize_rcu();
442 : : }
443 : 0 : }
444 : : EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
445 : :
446 : : /**
447 : : * blk_queue_bypass_end - leave queue bypass mode
448 : : * @q: queue of interest
449 : : *
450 : : * Leave bypass mode and restore the normal queueing behavior.
451 : : */
452 : 0 : void blk_queue_bypass_end(struct request_queue *q)
453 : : {
454 : 0 : spin_lock_irq(q->queue_lock);
455 [ # # ]: 0 : if (!--q->bypass_depth)
456 : : queue_flag_clear(QUEUE_FLAG_BYPASS, q);
457 [ # # ][ # # ]: 0 : WARN_ON_ONCE(q->bypass_depth < 0);
[ # # ]
458 : 0 : spin_unlock_irq(q->queue_lock);
459 : 0 : }
460 : : EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
461 : :
462 : : /**
463 : : * blk_cleanup_queue - shutdown a request queue
464 : : * @q: request queue to shutdown
465 : : *
466 : : * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
467 : : * put it. All future requests will be failed immediately with -ENODEV.
468 : : */
469 : 0 : void blk_cleanup_queue(struct request_queue *q)
470 : : {
471 : 0 : spinlock_t *lock = q->queue_lock;
472 : :
473 : : /* mark @q DYING, no new request or merges will be allowed afterwards */
474 : 0 : mutex_lock(&q->sysfs_lock);
475 : : queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
476 : : spin_lock_irq(lock);
477 : :
478 : : /*
479 : : * A dying queue is permanently in bypass mode till released. Note
480 : : * that, unlike blk_queue_bypass_start(), we aren't performing
481 : : * synchronize_rcu() after entering bypass mode to avoid the delay
482 : : * as some drivers create and destroy a lot of queues while
483 : : * probing. This is still safe because blk_release_queue() will be
484 : : * called only after the queue refcnt drops to zero and nothing,
485 : : * RCU or not, would be traversing the queue by then.
486 : : */
487 : 0 : q->bypass_depth++;
488 : : queue_flag_set(QUEUE_FLAG_BYPASS, q);
489 : :
490 : : queue_flag_set(QUEUE_FLAG_NOMERGES, q);
491 : : queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
492 : : queue_flag_set(QUEUE_FLAG_DYING, q);
493 : : spin_unlock_irq(lock);
494 : 0 : mutex_unlock(&q->sysfs_lock);
495 : :
496 : : /*
497 : : * Drain all requests queued before DYING marking. Set DEAD flag to
498 : : * prevent that q->request_fn() gets invoked after draining finished.
499 : : */
500 : : spin_lock_irq(lock);
501 : 0 : __blk_drain_queue(q, true);
502 : : queue_flag_set(QUEUE_FLAG_DEAD, q);
503 : : spin_unlock_irq(lock);
504 : :
505 : : /* @q won't process any more request, flush async actions */
506 : 0 : del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
507 : : blk_sync_queue(q);
508 : :
509 : : spin_lock_irq(lock);
510 [ # # ]: 0 : if (q->queue_lock != &q->__queue_lock)
511 : 0 : q->queue_lock = &q->__queue_lock;
512 : : spin_unlock_irq(lock);
513 : :
514 : : /* @q is and will stay empty, shutdown and put */
515 : : blk_put_queue(q);
516 : 0 : }
517 : : EXPORT_SYMBOL(blk_cleanup_queue);
518 : :
519 : 0 : int blk_init_rl(struct request_list *rl, struct request_queue *q,
520 : : gfp_t gfp_mask)
521 : : {
522 [ # # ]: 0 : if (unlikely(rl->rq_pool))
523 : : return 0;
524 : :
525 : 0 : rl->q = q;
526 : 0 : rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
527 : 0 : rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
528 : 0 : init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
529 : 0 : init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
530 : :
531 : 0 : rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
532 : : mempool_free_slab, request_cachep,
533 : : gfp_mask, q->node);
534 [ # # ]: 0 : if (!rl->rq_pool)
535 : : return -ENOMEM;
536 : :
537 : 0 : return 0;
538 : : }
539 : :
540 : 0 : void blk_exit_rl(struct request_list *rl)
541 : : {
542 [ # # ]: 0 : if (rl->rq_pool)
543 : 0 : mempool_destroy(rl->rq_pool);
544 : 0 : }
545 : :
546 : 0 : struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
547 : : {
548 : 0 : return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
549 : : }
550 : : EXPORT_SYMBOL(blk_alloc_queue);
551 : :
552 : 0 : struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
553 : : {
554 : : struct request_queue *q;
555 : : int err;
556 : :
557 : 0 : q = kmem_cache_alloc_node(blk_requestq_cachep,
558 : : gfp_mask | __GFP_ZERO, node_id);
559 [ # # ]: 0 : if (!q)
560 : : return NULL;
561 : :
562 [ # # ]: 0 : if (percpu_counter_init(&q->mq_usage_counter, 0))
563 : : goto fail_q;
564 : :
565 : 0 : q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
566 [ # # ]: 0 : if (q->id < 0)
567 : : goto fail_c;
568 : :
569 : 0 : q->backing_dev_info.ra_pages =
570 : : (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
571 : 0 : q->backing_dev_info.state = 0;
572 : 0 : q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
573 : 0 : q->backing_dev_info.name = "block";
574 : 0 : q->node = node_id;
575 : :
576 : 0 : err = bdi_init(&q->backing_dev_info);
577 [ # # ]: 0 : if (err)
578 : : goto fail_id;
579 : :
580 : 0 : setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
581 : : laptop_mode_timer_fn, (unsigned long) q);
582 : 0 : setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
583 : 0 : INIT_LIST_HEAD(&q->queue_head);
584 : 0 : INIT_LIST_HEAD(&q->timeout_list);
585 : 0 : INIT_LIST_HEAD(&q->icq_list);
586 : : #ifdef CONFIG_BLK_CGROUP
587 : : INIT_LIST_HEAD(&q->blkg_list);
588 : : #endif
589 : 0 : INIT_LIST_HEAD(&q->flush_queue[0]);
590 : 0 : INIT_LIST_HEAD(&q->flush_queue[1]);
591 : 0 : INIT_LIST_HEAD(&q->flush_data_in_flight);
592 : 0 : INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
593 : :
594 : 0 : kobject_init(&q->kobj, &blk_queue_ktype);
595 : :
596 : 0 : mutex_init(&q->sysfs_lock);
597 : 0 : spin_lock_init(&q->__queue_lock);
598 : :
599 : : /*
600 : : * By default initialize queue_lock to internal lock and driver can
601 : : * override it later if need be.
602 : : */
603 : 0 : q->queue_lock = &q->__queue_lock;
604 : :
605 : : /*
606 : : * A queue starts its life with bypass turned on to avoid
607 : : * unnecessary bypass on/off overhead and nasty surprises during
608 : : * init. The initial bypass will be finished when the queue is
609 : : * registered by blk_register_queue().
610 : : */
611 : 0 : q->bypass_depth = 1;
612 : : __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
613 : :
614 : 0 : init_waitqueue_head(&q->mq_freeze_wq);
615 : :
616 : : if (blkcg_init_queue(q))
617 : : goto fail_bdi;
618 : :
619 : 0 : return q;
620 : :
621 : : fail_bdi:
622 : : bdi_destroy(&q->backing_dev_info);
623 : : fail_id:
624 : 0 : ida_simple_remove(&blk_queue_ida, q->id);
625 : : fail_c:
626 : 0 : percpu_counter_destroy(&q->mq_usage_counter);
627 : : fail_q:
628 : 0 : kmem_cache_free(blk_requestq_cachep, q);
629 : 0 : return NULL;
630 : : }
631 : : EXPORT_SYMBOL(blk_alloc_queue_node);
632 : :
633 : : /**
634 : : * blk_init_queue - prepare a request queue for use with a block device
635 : : * @rfn: The function to be called to process requests that have been
636 : : * placed on the queue.
637 : : * @lock: Request queue spin lock
638 : : *
639 : : * Description:
640 : : * If a block device wishes to use the standard request handling procedures,
641 : : * which sorts requests and coalesces adjacent requests, then it must
642 : : * call blk_init_queue(). The function @rfn will be called when there
643 : : * are requests on the queue that need to be processed. If the device
644 : : * supports plugging, then @rfn may not be called immediately when requests
645 : : * are available on the queue, but may be called at some time later instead.
646 : : * Plugged queues are generally unplugged when a buffer belonging to one
647 : : * of the requests on the queue is needed, or due to memory pressure.
648 : : *
649 : : * @rfn is not required, or even expected, to remove all requests off the
650 : : * queue, but only as many as it can handle at a time. If it does leave
651 : : * requests on the queue, it is responsible for arranging that the requests
652 : : * get dealt with eventually.
653 : : *
654 : : * The queue spin lock must be held while manipulating the requests on the
655 : : * request queue; this lock will be taken also from interrupt context, so irq
656 : : * disabling is needed for it.
657 : : *
658 : : * Function returns a pointer to the initialized request queue, or %NULL if
659 : : * it didn't succeed.
660 : : *
661 : : * Note:
662 : : * blk_init_queue() must be paired with a blk_cleanup_queue() call
663 : : * when the block device is deactivated (such as at module unload).
664 : : **/
665 : :
666 : 0 : struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
667 : : {
668 : 0 : return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
669 : : }
670 : : EXPORT_SYMBOL(blk_init_queue);
671 : :
672 : : struct request_queue *
673 : 0 : blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
674 : : {
675 : : struct request_queue *uninit_q, *q;
676 : :
677 : 0 : uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
678 [ # # ]: 0 : if (!uninit_q)
679 : : return NULL;
680 : :
681 : 0 : q = blk_init_allocated_queue(uninit_q, rfn, lock);
682 [ # # ]: 0 : if (!q)
683 : 0 : blk_cleanup_queue(uninit_q);
684 : :
685 : 0 : return q;
686 : : }
687 : : EXPORT_SYMBOL(blk_init_queue_node);
688 : :
689 : : struct request_queue *
690 : 0 : blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
691 : : spinlock_t *lock)
692 : : {
693 [ # # ]: 0 : if (!q)
694 : : return NULL;
695 : :
696 [ # # ]: 0 : if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
697 : : return NULL;
698 : :
699 : 0 : q->request_fn = rfn;
700 : 0 : q->prep_rq_fn = NULL;
701 : 0 : q->unprep_rq_fn = NULL;
702 : 0 : q->queue_flags |= QUEUE_FLAG_DEFAULT;
703 : :
704 : : /* Override internal queue lock with supplied lock pointer */
705 [ # # ]: 0 : if (lock)
706 : 0 : q->queue_lock = lock;
707 : :
708 : : /*
709 : : * This also sets hw/phys segments, boundary and size
710 : : */
711 : 0 : blk_queue_make_request(q, blk_queue_bio);
712 : :
713 : 0 : q->sg_reserved_size = INT_MAX;
714 : :
715 : : /* Protect q->elevator from elevator_change */
716 : 0 : mutex_lock(&q->sysfs_lock);
717 : :
718 : : /* init elevator */
719 [ # # ]: 0 : if (elevator_init(q, NULL)) {
720 : 0 : mutex_unlock(&q->sysfs_lock);
721 : 0 : return NULL;
722 : : }
723 : :
724 : 0 : mutex_unlock(&q->sysfs_lock);
725 : :
726 : 0 : return q;
727 : : }
728 : : EXPORT_SYMBOL(blk_init_allocated_queue);
729 : :
730 : 0 : bool blk_get_queue(struct request_queue *q)
731 : : {
732 [ # # ]: 0 : if (likely(!blk_queue_dying(q))) {
733 : : __blk_get_queue(q);
734 : 0 : return true;
735 : : }
736 : :
737 : : return false;
738 : : }
739 : : EXPORT_SYMBOL(blk_get_queue);
740 : :
741 : : static inline void blk_free_request(struct request_list *rl, struct request *rq)
742 : : {
743 [ + - ]: 435065 : if (rq->cmd_flags & REQ_ELVPRIV) {
744 : 435065 : elv_put_request(rl->q, rq);
745 [ + - ]: 435065 : if (rq->elv.icq)
746 : 435065 : put_io_context(rq->elv.icq->ioc);
747 : : }
748 : :
749 : 435065 : mempool_free(rq, rl->rq_pool);
750 : : }
751 : :
752 : : /*
753 : : * ioc_batching returns true if the ioc is a valid batching request and
754 : : * should be given priority access to a request.
755 : : */
756 : : static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
757 : : {
758 [ + - ][ + + ]: 884542 : if (!ioc)
[ + - ]
759 : : return 0;
760 : :
761 : : /*
762 : : * Make sure the process is able to allocate at least 1 request
763 : : * even if the batch times out, otherwise we could theoretically
764 : : * lose wakeups.
765 : : */
766 [ + + ][ + + ]: 448806 : return ioc->nr_batch_requests == q->nr_batching ||
[ + + ][ + + ]
[ + + ][ + + ]
767 : : (ioc->nr_batch_requests > 0
768 [ + + ][ + + ]: 88026 : && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
[ + + ]
769 : : }
770 : :
771 : : /*
772 : : * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
773 : : * will cause the process to be a "batcher" on all queues in the system. This
774 : : * is the behaviour we want though - once it gets a wakeup it should be given
775 : : * a nice run.
776 : : */
777 : 0 : static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
778 : : {
779 [ + - ][ + ]: 2896 : if (!ioc || ioc_batching(q, ioc))
780 : 0 : return;
781 : :
782 : 741 : ioc->nr_batch_requests = q->nr_batching;
783 : 741 : ioc->last_waited = jiffies;
784 : : }
785 : :
786 : 0 : static void __freed_request(struct request_list *rl, int sync)
787 : : {
788 : 870130 : struct request_queue *q = rl->q;
789 : :
790 : : /*
791 : : * bdi isn't aware of blkcg yet. As all async IOs end up root
792 : : * blkcg anyway, just use root blkcg state.
793 : : */
794 [ + - ][ + + ]: 435065 : if (rl == &q->root_rl &&
795 : 435065 : rl->count[sync] < queue_congestion_off_threshold(q))
796 : : blk_clear_queue_congested(q, sync);
797 : :
798 [ + + ]: 435065 : if (rl->count[sync] + 1 <= q->nr_requests) {
799 [ + + ]: 423455 : if (waitqueue_active(&rl->wait[sync]))
800 : 683 : wake_up(&rl->wait[sync]);
801 : :
802 : : blk_clear_rl_full(rl, sync);
803 : : }
804 : 435065 : }
805 : :
806 : : /*
807 : : * A request has just been released. Account for it, update the full and
808 : : * congestion status, wake up any waiters. Called under q->queue_lock.
809 : : */
810 : 0 : static void freed_request(struct request_list *rl, unsigned int flags)
811 : : {
812 : 435065 : struct request_queue *q = rl->q;
813 : 435065 : int sync = rw_is_sync(flags);
814 : :
815 : 435065 : q->nr_rqs[sync]--;
816 : 435065 : rl->count[sync]--;
817 [ + - ]: 435065 : if (flags & REQ_ELVPRIV)
818 : 435065 : q->nr_rqs_elvpriv--;
819 : :
820 : 435065 : __freed_request(rl, sync);
821 : :
822 [ - + ]: 435065 : if (unlikely(rl->starved[sync ^ 1]))
823 : 0 : __freed_request(rl, sync ^ 1);
824 : 435065 : }
825 : :
826 : : /*
827 : : * Determine if elevator data should be initialized when allocating the
828 : : * request associated with @bio.
829 : : */
830 : : static bool blk_rq_should_init_elevator(struct bio *bio)
831 : : {
832 [ + + ]: 435066 : if (!bio)
833 : : return true;
834 : :
835 : : /*
836 : : * Flush requests do not use the elevator so skip initialization.
837 : : * This allows a request to share the flush and elevator data.
838 : : */
839 [ + - ]: 396248 : if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
840 : : return false;
841 : :
842 : : return true;
843 : : }
844 : :
845 : : /**
846 : : * rq_ioc - determine io_context for request allocation
847 : : * @bio: request being allocated is for this bio (can be %NULL)
848 : : *
849 : : * Determine io_context to use for request allocation for @bio. May return
850 : : * %NULL if %current->io_context doesn't exist.
851 : : */
852 : : static struct io_context *rq_ioc(struct bio *bio)
853 : : {
854 : : #ifdef CONFIG_BLK_CGROUP
855 : : if (bio && bio->bi_ioc)
856 : : return bio->bi_ioc;
857 : : #endif
858 : 435751 : return current->io_context;
859 : : }
860 : :
861 : : /**
862 : : * __get_request - get a free request
863 : : * @rl: request list to allocate from
864 : : * @rw_flags: RW and SYNC flags
865 : : * @bio: bio to allocate request for (can be %NULL)
866 : : * @gfp_mask: allocation mask
867 : : *
868 : : * Get a free request from @q. This function may fail under memory
869 : : * pressure or if @q is dead.
870 : : *
871 : : * Must be callled with @q->queue_lock held and,
872 : : * Returns %NULL on failure, with @q->queue_lock held.
873 : : * Returns !%NULL on success, with @q->queue_lock *not held*.
874 : : */
875 : 0 : static struct request *__get_request(struct request_list *rl, int rw_flags,
876 : : struct bio *bio, gfp_t gfp_mask)
877 : : {
878 : 871502 : struct request_queue *q = rl->q;
879 : : struct request *rq;
880 : 435751 : struct elevator_type *et = q->elevator->type;
881 : : struct io_context *ioc = rq_ioc(bio);
882 : : struct io_cq *icq = NULL;
883 : 435751 : const bool is_sync = rw_is_sync(rw_flags) != 0;
884 : : int may_queue;
885 : :
886 [ + - ]: 435751 : if (unlikely(blk_queue_dying(q)))
887 : : return NULL;
888 : :
889 : 435751 : may_queue = elv_may_queue(q, rw_flags);
890 [ + - ]: 435751 : if (may_queue == ELV_MQUEUE_NO)
891 : : goto rq_starved;
892 : :
893 [ + + ]: 435751 : if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
894 [ + + ]: 13787 : if (rl->count[is_sync]+1 >= q->nr_requests) {
895 : : /*
896 : : * The queue will fill after this allocation, so set
897 : : * it as full, and mark this process as "batching".
898 : : * This process will be allowed to complete a batch of
899 : : * requests, others will be blocked.
900 : : */
901 [ + + ]: 13058 : if (!blk_rl_full(rl, is_sync)) {
902 : 763 : ioc_set_batching(q, ioc);
903 : : blk_set_rl_full(rl, is_sync);
904 : : } else {
905 [ + - ]: 12295 : if (may_queue != ELV_MQUEUE_MUST
906 [ + + ]: 12295 : && !ioc_batching(q, ioc)) {
907 : : /*
908 : : * The queue is full and the allocating
909 : : * process is not a "batcher", and not
910 : : * exempted by the IO scheduler
911 : : */
912 : : return NULL;
913 : : }
914 : : }
915 : : }
916 : : /*
917 : : * bdi isn't aware of blkcg yet. As all async IOs end up
918 : : * root blkcg anyway, just use root blkcg state.
919 : : */
920 [ + - ]: 13102 : if (rl == &q->root_rl)
921 : : blk_set_queue_congested(q, is_sync);
922 : : }
923 : :
924 : : /*
925 : : * Only allow batching queuers to allocate up to 50% over the defined
926 : : * limit of requests, otherwise we could have thousands of requests
927 : : * allocated with any setting of ->nr_requests
928 : : */
929 [ + - ]: 435066 : if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
930 : : return NULL;
931 : :
932 : 435066 : q->nr_rqs[is_sync]++;
933 : 435066 : rl->count[is_sync]++;
934 : 435066 : rl->starved[is_sync] = 0;
935 : :
936 : : /*
937 : : * Decide whether the new request will be managed by elevator. If
938 : : * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
939 : : * prevent the current elevator from being destroyed until the new
940 : : * request is freed. This guarantees icq's won't be destroyed and
941 : : * makes creating new ones safe.
942 : : *
943 : : * Also, lookup icq while holding queue_lock. If it doesn't exist,
944 : : * it will be created after releasing queue_lock.
945 : : */
946 [ + - ][ + - ]: 435066 : if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
947 : 435066 : rw_flags |= REQ_ELVPRIV;
948 : 435066 : q->nr_rqs_elvpriv++;
949 [ + - ][ + - ]: 435066 : if (et->icq_cache && ioc)
950 : 435066 : icq = ioc_lookup_icq(ioc, q);
951 : : }
952 : :
953 [ + - ]: 435066 : if (blk_queue_io_stat(q))
954 : 435066 : rw_flags |= REQ_IO_STAT;
955 : 435066 : spin_unlock_irq(q->queue_lock);
956 : :
957 : : /* allocate and init request */
958 : 435066 : rq = mempool_alloc(rl->rq_pool, gfp_mask);
959 [ + - ]: 435066 : if (!rq)
960 : : goto fail_alloc;
961 : :
962 : 435066 : blk_rq_init(q, rq);
963 : : blk_rq_set_rl(rq, rl);
964 : 435066 : rq->cmd_flags = rw_flags | REQ_ALLOCED;
965 : :
966 : : /* init elvpriv */
967 [ + - ]: 435066 : if (rw_flags & REQ_ELVPRIV) {
968 [ + - ][ + + ]: 435066 : if (unlikely(et->icq_cache && !icq)) {
969 [ + - ]: 6724 : if (ioc)
970 : 6724 : icq = ioc_create_icq(ioc, q, gfp_mask);
971 [ + - ]: 6724 : if (!icq)
972 : : goto fail_elvpriv;
973 : : }
974 : :
975 : 435066 : rq->elv.icq = icq;
976 [ + - ]: 435066 : if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
977 : : goto fail_elvpriv;
978 : :
979 : : /* @rq->elv.icq holds io_context until @rq is freed */
980 [ + - ]: 435066 : if (icq)
981 : 435066 : get_io_context(icq->ioc);
982 : : }
983 : : out:
984 : : /*
985 : : * ioc may be NULL here, and ioc_batching will be false. That's
986 : : * OK, if the queue is under the request limit then requests need
987 : : * not count toward the nr_batch_requests limit. There will always
988 : : * be some limit enforced by BLK_BATCH_TIME.
989 : : */
990 [ + + ]: 870799 : if (ioc_batching(q, ioc))
991 : 12399 : ioc->nr_batch_requests--;
992 : :
993 : 435048 : trace_block_getrq(q, bio, rw_flags & 1);
994 : 435048 : return rq;
995 : :
996 : : fail_elvpriv:
997 : : /*
998 : : * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
999 : : * and may fail indefinitely under memory pressure and thus
1000 : : * shouldn't stall IO. Treat this request as !elvpriv. This will
1001 : : * disturb iosched and blkcg but weird is bettern than dead.
1002 : : */
1003 [ # # ]: 0 : printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
1004 : : dev_name(q->backing_dev_info.dev));
1005 : :
1006 : 0 : rq->cmd_flags &= ~REQ_ELVPRIV;
1007 : 0 : rq->elv.icq = NULL;
1008 : :
1009 : 0 : spin_lock_irq(q->queue_lock);
1010 : 0 : q->nr_rqs_elvpriv--;
1011 : 0 : spin_unlock_irq(q->queue_lock);
1012 : : goto out;
1013 : :
1014 : : fail_alloc:
1015 : : /*
1016 : : * Allocation failed presumably due to memory. Undo anything we
1017 : : * might have messed up.
1018 : : *
1019 : : * Allocating task should really be put onto the front of the wait
1020 : : * queue, but this is pretty rare.
1021 : : */
1022 : 0 : spin_lock_irq(q->queue_lock);
1023 : 0 : freed_request(rl, rw_flags);
1024 : :
1025 : : /*
1026 : : * in the very unlikely event that allocation failed and no
1027 : : * requests for this direction was pending, mark us starved so that
1028 : : * freeing of a request in the other direction will notice
1029 : : * us. another possible fix would be to split the rq mempool into
1030 : : * READ and WRITE
1031 : : */
1032 : : rq_starved:
1033 [ # # ]: 0 : if (unlikely(rl->count[is_sync] == 0))
1034 : 0 : rl->starved[is_sync] = 1;
1035 : : return NULL;
1036 : : }
1037 : :
1038 : : /**
1039 : : * get_request - get a free request
1040 : : * @q: request_queue to allocate request from
1041 : : * @rw_flags: RW and SYNC flags
1042 : : * @bio: bio to allocate request for (can be %NULL)
1043 : : * @gfp_mask: allocation mask
1044 : : *
1045 : : * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1046 : : * function keeps retrying under memory pressure and fails iff @q is dead.
1047 : : *
1048 : : * Must be callled with @q->queue_lock held and,
1049 : : * Returns %NULL on failure, with @q->queue_lock held.
1050 : : * Returns !%NULL on success, with @q->queue_lock *not held*.
1051 : : */
1052 : 0 : static struct request *get_request(struct request_queue *q, int rw_flags,
1053 : : struct bio *bio, gfp_t gfp_mask)
1054 : : {
1055 : 435066 : const bool is_sync = rw_is_sync(rw_flags) != 0;
1056 : 870132 : DEFINE_WAIT(wait);
1057 : : struct request_list *rl;
1058 : : struct request *rq;
1059 : :
1060 : 435066 : rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1061 : : retry:
1062 : 435751 : rq = __get_request(rl, rw_flags, bio, gfp_mask);
1063 [ + + ]: 435726 : if (rq)
1064 : : return rq;
1065 : :
1066 [ + - ][ + - ]: 685 : if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1067 : : blk_put_rl(rl);
1068 : : return NULL;
1069 : : }
1070 : :
1071 : : /* wait on @rl and retry */
1072 : 685 : prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1073 : : TASK_UNINTERRUPTIBLE);
1074 : :
1075 : 685 : trace_block_sleeprq(q, bio, rw_flags & 1);
1076 : :
1077 : 0 : spin_unlock_irq(q->queue_lock);
1078 : 685 : io_schedule();
1079 : :
1080 : : /*
1081 : : * After sleeping, we become a "batching" process and will be able
1082 : : * to allocate at least one request, and up to a big batch of them
1083 : : * for a small period time. See ioc_batching, ioc_set_batching
1084 : : */
1085 : 685 : ioc_set_batching(q, current->io_context);
1086 : :
1087 : 685 : spin_lock_irq(q->queue_lock);
1088 : 685 : finish_wait(&rl->wait[is_sync], &wait);
1089 : :
1090 : 685 : goto retry;
1091 : : }
1092 : :
1093 : 0 : static struct request *blk_old_get_request(struct request_queue *q, int rw,
1094 : : gfp_t gfp_mask)
1095 : : {
1096 : : struct request *rq;
1097 : :
1098 [ - + ]: 38818 : BUG_ON(rw != READ && rw != WRITE);
1099 : :
1100 : : /* create ioc upfront */
1101 : 38818 : create_io_context(gfp_mask, q->node);
1102 : :
1103 : 38818 : spin_lock_irq(q->queue_lock);
1104 : 38818 : rq = get_request(q, rw, NULL, gfp_mask);
1105 [ - + ]: 38818 : if (!rq)
1106 : 0 : spin_unlock_irq(q->queue_lock);
1107 : : /* q->queue_lock is unlocked at this point */
1108 : :
1109 : 38818 : return rq;
1110 : : }
1111 : :
1112 : 0 : struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1113 : : {
1114 [ - + ]: 38818 : if (q->mq_ops)
1115 : 0 : return blk_mq_alloc_request(q, rw, gfp_mask, false);
1116 : : else
1117 : 38818 : return blk_old_get_request(q, rw, gfp_mask);
1118 : : }
1119 : : EXPORT_SYMBOL(blk_get_request);
1120 : :
1121 : : /**
1122 : : * blk_make_request - given a bio, allocate a corresponding struct request.
1123 : : * @q: target request queue
1124 : : * @bio: The bio describing the memory mappings that will be submitted for IO.
1125 : : * It may be a chained-bio properly constructed by block/bio layer.
1126 : : * @gfp_mask: gfp flags to be used for memory allocation
1127 : : *
1128 : : * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1129 : : * type commands. Where the struct request needs to be farther initialized by
1130 : : * the caller. It is passed a &struct bio, which describes the memory info of
1131 : : * the I/O transfer.
1132 : : *
1133 : : * The caller of blk_make_request must make sure that bi_io_vec
1134 : : * are set to describe the memory buffers. That bio_data_dir() will return
1135 : : * the needed direction of the request. (And all bio's in the passed bio-chain
1136 : : * are properly set accordingly)
1137 : : *
1138 : : * If called under none-sleepable conditions, mapped bio buffers must not
1139 : : * need bouncing, by calling the appropriate masked or flagged allocator,
1140 : : * suitable for the target device. Otherwise the call to blk_queue_bounce will
1141 : : * BUG.
1142 : : *
1143 : : * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1144 : : * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1145 : : * anything but the first bio in the chain. Otherwise you risk waiting for IO
1146 : : * completion of a bio that hasn't been submitted yet, thus resulting in a
1147 : : * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1148 : : * of bio_alloc(), as that avoids the mempool deadlock.
1149 : : * If possible a big IO should be split into smaller parts when allocation
1150 : : * fails. Partial allocation should not be an error, or you risk a live-lock.
1151 : : */
1152 : 0 : struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1153 : : gfp_t gfp_mask)
1154 : : {
1155 : 0 : struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1156 : :
1157 [ # # ]: 0 : if (unlikely(!rq))
1158 : : return ERR_PTR(-ENOMEM);
1159 : :
1160 [ # # ]: 0 : for_each_bio(bio) {
1161 : 0 : struct bio *bounce_bio = bio;
1162 : : int ret;
1163 : :
1164 : 0 : blk_queue_bounce(q, &bounce_bio);
1165 : 0 : ret = blk_rq_append_bio(q, rq, bounce_bio);
1166 [ # # ]: 0 : if (unlikely(ret)) {
1167 : 0 : blk_put_request(rq);
1168 : 0 : return ERR_PTR(ret);
1169 : : }
1170 : : }
1171 : :
1172 : : return rq;
1173 : : }
1174 : : EXPORT_SYMBOL(blk_make_request);
1175 : :
1176 : : /**
1177 : : * blk_requeue_request - put a request back on queue
1178 : : * @q: request queue where request should be inserted
1179 : : * @rq: request to be inserted
1180 : : *
1181 : : * Description:
1182 : : * Drivers often keep queueing requests until the hardware cannot accept
1183 : : * more, when that condition happens we need to put the request back
1184 : : * on the queue. Must be called with queue lock held.
1185 : : */
1186 : 0 : void blk_requeue_request(struct request_queue *q, struct request *rq)
1187 : : {
1188 : 0 : blk_delete_timer(rq);
1189 : : blk_clear_rq_complete(rq);
1190 : : trace_block_rq_requeue(q, rq);
1191 : :
1192 [ # # ]: 0 : if (blk_rq_tagged(rq))
1193 : 0 : blk_queue_end_tag(q, rq);
1194 : :
1195 [ # # ]: 0 : BUG_ON(blk_queued_rq(rq));
1196 : :
1197 : 0 : elv_requeue_request(q, rq);
1198 : 0 : }
1199 : : EXPORT_SYMBOL(blk_requeue_request);
1200 : :
1201 : : static void add_acct_request(struct request_queue *q, struct request *rq,
1202 : : int where)
1203 : : {
1204 : 21967 : blk_account_io_start(rq, true);
1205 : 21967 : __elv_add_request(q, rq, where);
1206 : : }
1207 : :
1208 : 0 : static void part_round_stats_single(int cpu, struct hd_struct *part,
1209 : : unsigned long now)
1210 : : {
1211 [ + ]: 1584858 : if (now == part->stamp)
1212 : 0 : return;
1213 : :
1214 [ + + ]: 1971082 : if (part_in_flight(part)) {
1215 : 629788 : __part_stat_add(cpu, part, time_in_queue,
1216 : : part_in_flight(part) * (now - part->stamp));
1217 : 314894 : __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1218 : : }
1219 : 386224 : part->stamp = now;
1220 : : }
1221 : :
1222 : : /**
1223 : : * part_round_stats() - Round off the performance stats on a struct disk_stats.
1224 : : * @cpu: cpu number for stats access
1225 : : * @part: target partition
1226 : : *
1227 : : * The average IO queue length and utilisation statistics are maintained
1228 : : * by observing the current state of the queue length and the amount of
1229 : : * time it has been in this state for.
1230 : : *
1231 : : * Normally, that accounting is done on IO completion, but that can result
1232 : : * in more than a second's worth of IO being accounted for within any one
1233 : : * second, leading to >100% utilisation. To deal with that, we call this
1234 : : * function to do a round-off before returning the results when reading
1235 : : * /proc/diskstats. This accounts immediately for all queue usage up to
1236 : : * the current jiffies and restarts the counters again.
1237 : : */
1238 : 0 : void part_round_stats(int cpu, struct hd_struct *part)
1239 : : {
1240 : 792476 : unsigned long now = jiffies;
1241 : :
1242 [ + ]: 792476 : if (part->partno)
1243 : 792480 : part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1244 : 792426 : part_round_stats_single(cpu, part, now);
1245 : 792465 : }
1246 : : EXPORT_SYMBOL_GPL(part_round_stats);
1247 : :
1248 : : #ifdef CONFIG_PM_RUNTIME
1249 : : static void blk_pm_put_request(struct request *rq)
1250 : : {
1251 : : if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
1252 : : pm_runtime_mark_last_busy(rq->q->dev);
1253 : : }
1254 : : #else
1255 : : static inline void blk_pm_put_request(struct request *rq) {}
1256 : : #endif
1257 : :
1258 : : /*
1259 : : * queue lock must be held
1260 : : */
1261 : 0 : void __blk_put_request(struct request_queue *q, struct request *req)
1262 : : {
1263 [ + - ]: 435065 : if (unlikely(!q))
1264 : 435065 : return;
1265 : :
1266 : : blk_pm_put_request(req);
1267 : :
1268 : 435065 : elv_completed_request(q, req);
1269 : :
1270 : : /* this is a bio leak */
1271 [ - + ]: 435065 : WARN_ON(req->bio != NULL);
1272 : :
1273 : : /*
1274 : : * Request may not have originated from ll_rw_blk. if not,
1275 : : * it didn't come out of our reserved rq pools
1276 : : */
1277 [ + - ]: 870130 : if (req->cmd_flags & REQ_ALLOCED) {
1278 : 435065 : unsigned int flags = req->cmd_flags;
1279 : 870130 : struct request_list *rl = blk_rq_rl(req);
1280 : :
1281 [ - + ]: 435065 : BUG_ON(!list_empty(&req->queuelist));
1282 [ - + ]: 435065 : BUG_ON(!hlist_unhashed(&req->hash));
1283 : :
1284 : : blk_free_request(rl, req);
1285 : 435065 : freed_request(rl, flags);
1286 : : blk_put_rl(rl);
1287 : : }
1288 : : }
1289 : : EXPORT_SYMBOL_GPL(__blk_put_request);
1290 : :
1291 : 0 : void blk_put_request(struct request *req)
1292 : : {
1293 : 38817 : struct request_queue *q = req->q;
1294 : :
1295 [ - + ]: 38817 : if (q->mq_ops)
1296 : 0 : blk_mq_free_request(req);
1297 : : else {
1298 : : unsigned long flags;
1299 : :
1300 : 38817 : spin_lock_irqsave(q->queue_lock, flags);
1301 : 38817 : __blk_put_request(q, req);
1302 : 38817 : spin_unlock_irqrestore(q->queue_lock, flags);
1303 : : }
1304 : 38817 : }
1305 : : EXPORT_SYMBOL(blk_put_request);
1306 : :
1307 : : /**
1308 : : * blk_add_request_payload - add a payload to a request
1309 : : * @rq: request to update
1310 : : * @page: page backing the payload
1311 : : * @len: length of the payload.
1312 : : *
1313 : : * This allows to later add a payload to an already submitted request by
1314 : : * a block driver. The driver needs to take care of freeing the payload
1315 : : * itself.
1316 : : *
1317 : : * Note that this is a quite horrible hack and nothing but handling of
1318 : : * discard requests should ever use it.
1319 : : */
1320 : 0 : void blk_add_request_payload(struct request *rq, struct page *page,
1321 : : unsigned int len)
1322 : : {
1323 : 0 : struct bio *bio = rq->bio;
1324 : :
1325 : 0 : bio->bi_io_vec->bv_page = page;
1326 : 0 : bio->bi_io_vec->bv_offset = 0;
1327 : 0 : bio->bi_io_vec->bv_len = len;
1328 : :
1329 : 0 : bio->bi_size = len;
1330 : 0 : bio->bi_vcnt = 1;
1331 : 0 : bio->bi_phys_segments = 1;
1332 : :
1333 : 0 : rq->__data_len = rq->resid_len = len;
1334 : 0 : rq->nr_phys_segments = 1;
1335 : 0 : rq->buffer = bio_data(bio);
1336 : 0 : }
1337 : : EXPORT_SYMBOL_GPL(blk_add_request_payload);
1338 : :
1339 : 0 : bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1340 : : struct bio *bio)
1341 : : {
1342 : 119835 : const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1343 : :
1344 [ + + ]: 119835 : if (!ll_back_merge_fn(q, req, bio))
1345 : : return false;
1346 : :
1347 : : trace_block_bio_backmerge(q, req, bio);
1348 : :
1349 [ + + ]: 97624 : if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1350 : 3914 : blk_rq_set_mixed_merge(req);
1351 : :
1352 : 97624 : req->biotail->bi_next = bio;
1353 : 97624 : req->biotail = bio;
1354 : 97624 : req->__data_len += bio->bi_size;
1355 : 97624 : req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1356 : :
1357 : 97624 : blk_account_io_start(req, false);
1358 : 97624 : return true;
1359 : : }
1360 : :
1361 : 0 : bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1362 : : struct bio *bio)
1363 : : {
1364 : 716 : const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1365 : :
1366 [ + + ]: 716 : if (!ll_front_merge_fn(q, req, bio))
1367 : : return false;
1368 : :
1369 : : trace_block_bio_frontmerge(q, req, bio);
1370 : :
1371 [ + + ]: 1423 : if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1372 : 105 : blk_rq_set_mixed_merge(req);
1373 : :
1374 : 707 : bio->bi_next = req->bio;
1375 : 707 : req->bio = bio;
1376 : :
1377 : : /*
1378 : : * may not be valid. if the low level driver said
1379 : : * it didn't need a bounce buffer then it better
1380 : : * not touch req->buffer either...
1381 : : */
1382 : 707 : req->buffer = bio_data(bio);
1383 : 707 : req->__sector = bio->bi_sector;
1384 : 707 : req->__data_len += bio->bi_size;
1385 : 707 : req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1386 : :
1387 : 707 : blk_account_io_start(req, false);
1388 : 707 : return true;
1389 : : }
1390 : :
1391 : : /**
1392 : : * blk_attempt_plug_merge - try to merge with %current's plugged list
1393 : : * @q: request_queue new bio is being queued at
1394 : : * @bio: new bio being queued
1395 : : * @request_count: out parameter for number of traversed plugged requests
1396 : : *
1397 : : * Determine whether @bio being queued on @q can be merged with a request
1398 : : * on %current's plugged list. Returns %true if merge was successful,
1399 : : * otherwise %false.
1400 : : *
1401 : : * Plugging coalesces IOs from the same issuer for the same purpose without
1402 : : * going through @q->queue_lock. As such it's more of an issuing mechanism
1403 : : * than scheduling, and the request, while may have elvpriv data, is not
1404 : : * added on the elevator at this point. In addition, we don't have
1405 : : * reliable access to the elevator outside queue lock. Only check basic
1406 : : * merging parameters without querying the elevator.
1407 : : */
1408 : 0 : bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1409 : : unsigned int *request_count)
1410 : : {
1411 : : struct blk_plug *plug;
1412 : : struct request *rq;
1413 : : bool ret = false;
1414 : : struct list_head *plug_list;
1415 : :
1416 [ + + ]: 494556 : if (blk_queue_nomerges(q))
1417 : : goto out;
1418 : :
1419 : 494554 : plug = current->plug;
1420 [ + + ]: 494554 : if (!plug)
1421 : : goto out;
1422 : 468565 : *request_count = 0;
1423 : :
1424 [ + ]: 468565 : if (q->mq_ops)
1425 : 0 : plug_list = &plug->mq_list;
1426 : : else
1427 : 468568 : plug_list = &plug->list;
1428 : :
1429 [ + + ]: 1309896 : list_for_each_entry_reverse(rq, plug_list, queuelist) {
1430 : : int el_ret;
1431 : :
1432 [ + + ]: 919122 : if (rq->q == q)
1433 : 919061 : (*request_count)++;
1434 : :
1435 [ + + ][ + + ]: 919122 : if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1436 : 80474 : continue;
1437 : :
1438 : 838710 : el_ret = blk_try_merge(rq, bio);
1439 [ + + ]: 838726 : if (el_ret == ELEVATOR_BACK_MERGE) {
1440 : 96868 : ret = bio_attempt_back_merge(q, rq, bio);
1441 [ + + ]: 96868 : if (ret)
1442 : : break;
1443 [ + + ]: 741858 : } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1444 : 34 : ret = bio_attempt_front_merge(q, rq, bio);
1445 [ - + ]: 34 : if (ret)
1446 : : break;
1447 : : }
1448 : : }
1449 : : out:
1450 : 78 : return ret;
1451 : : }
1452 : :
1453 : 0 : void init_request_from_bio(struct request *req, struct bio *bio)
1454 : : {
1455 : 396232 : req->cmd_type = REQ_TYPE_FS;
1456 : :
1457 : 396232 : req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1458 [ + + ]: 396232 : if (bio->bi_rw & REQ_RAHEAD)
1459 : 557 : req->cmd_flags |= REQ_FAILFAST_MASK;
1460 : :
1461 : 0 : req->errors = 0;
1462 : 0 : req->__sector = bio->bi_sector;
1463 : 0 : req->ioprio = bio_prio(bio);
1464 : 396232 : blk_rq_bio_prep(req->q, req, bio);
1465 : 396239 : }
1466 : :
1467 : 0 : void blk_queue_bio(struct request_queue *q, struct bio *bio)
1468 : : {
1469 : 494553 : const bool sync = !!(bio->bi_rw & REQ_SYNC);
1470 : : struct blk_plug *plug;
1471 : : int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1472 : : struct request *req;
1473 : 494553 : unsigned int request_count = 0;
1474 : :
1475 : : /*
1476 : : * low level driver can indicate that it wants pages above a
1477 : : * certain limit bounced to low memory (ie for highmem, or even
1478 : : * ISA dma in theory)
1479 : : */
1480 : 494553 : blk_queue_bounce(q, &bio);
1481 : :
1482 : : if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1483 : : bio_endio(bio, -EIO);
1484 : 77839 : return;
1485 : : }
1486 : :
1487 [ - + ]: 494556 : if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1488 : 0 : spin_lock_irq(q->queue_lock);
1489 : : where = ELEVATOR_INSERT_FLUSH;
1490 : 0 : goto get_rq;
1491 : : }
1492 : :
1493 : : /*
1494 : : * Check if we can merge with the plugged list before grabbing
1495 : : * any locks.
1496 : : */
1497 [ + + ]: 494556 : if (blk_attempt_plug_merge(q, bio, &request_count))
1498 : : return;
1499 : :
1500 : 416704 : spin_lock_irq(q->queue_lock);
1501 : :
1502 : 416740 : el_ret = elv_merge(q, &req, bio);
1503 [ + + ]: 416740 : if (el_ret == ELEVATOR_BACK_MERGE) {
1504 [ + + ]: 22967 : if (bio_attempt_back_merge(q, req, bio)) {
1505 : 19819 : elv_bio_merged(q, req, bio);
1506 [ + + ]: 19819 : if (!attempt_back_merge(q, req))
1507 : 19318 : elv_merged_request(q, req, el_ret);
1508 : : goto out_unlock;
1509 : : }
1510 [ + + ]: 393773 : } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1511 [ + + ]: 682 : if (bio_attempt_front_merge(q, req, bio)) {
1512 : 673 : elv_bio_merged(q, req, bio);
1513 [ + + ]: 673 : if (!attempt_front_merge(q, req))
1514 : 602 : elv_merged_request(q, req, el_ret);
1515 : : goto out_unlock;
1516 : : }
1517 : : }
1518 : :
1519 : : get_rq:
1520 : : /*
1521 : : * This sync check and mask will be re-done in init_request_from_bio(),
1522 : : * but we need to set it earlier to expose the sync flag to the
1523 : : * rq allocator and io schedulers.
1524 : : */
1525 : 396248 : rw_flags = bio_data_dir(bio);
1526 [ + + ]: 396248 : if (sync)
1527 : 260495 : rw_flags |= REQ_SYNC;
1528 : :
1529 : : /*
1530 : : * Grab a free request. This is might sleep but can not fail.
1531 : : * Returns with the queue unlocked.
1532 : : */
1533 : 396248 : req = get_request(q, rw_flags, bio, GFP_NOIO);
1534 [ - + ]: 396233 : if (unlikely(!req)) {
1535 : 0 : bio_endio(bio, -ENODEV); /* @q is dead */
1536 : 0 : goto out_unlock;
1537 : : }
1538 : :
1539 : : /*
1540 : : * After dropping the lock and possibly sleeping here, our request
1541 : : * may now be mergeable after it had proven unmergeable (above).
1542 : : * We don't worry about that case for efficiency. It won't happen
1543 : : * often, and the elevators are able to handle it.
1544 : : */
1545 : 396233 : init_request_from_bio(req, bio);
1546 : :
1547 [ + ]: 396203 : if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1548 : 396242 : req->cpu = raw_smp_processor_id();
1549 : :
1550 : 396203 : plug = current->plug;
1551 [ + + ]: 396203 : if (plug) {
1552 : : /*
1553 : : * If this is the first request added after a plug, fire
1554 : : * of a plug trace.
1555 : : */
1556 [ + + ]: 374236 : if (!request_count)
1557 : : trace_block_plug(q);
1558 : : else {
1559 [ + + ]: 135916 : if (request_count >= BLK_MAX_REQUEST_COUNT) {
1560 : 3299 : blk_flush_plug_list(plug, false);
1561 : : trace_block_plug(q);
1562 : : }
1563 : : }
1564 : 374237 : list_add_tail(&req->queuelist, &plug->list);
1565 : 374237 : blk_account_io_start(req, true);
1566 : : } else {
1567 : 21967 : spin_lock_irq(q->queue_lock);
1568 : 21967 : add_acct_request(q, req, where);
1569 : 21967 : __blk_run_queue(q);
1570 : : out_unlock:
1571 : 42459 : spin_unlock_irq(q->queue_lock);
1572 : : }
1573 : : }
1574 : : EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1575 : :
1576 : : /*
1577 : : * If bio->bi_dev is a partition, remap the location
1578 : : */
1579 : : static inline void blk_partition_remap(struct bio *bio)
1580 : : {
1581 : : struct block_device *bdev = bio->bi_bdev;
1582 : :
1583 [ + + ][ + + ]: 497763 : if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1584 : : struct hd_struct *p = bdev->bd_part;
1585 : :
1586 : 494530 : bio->bi_sector += p->start_sect;
1587 : 494530 : bio->bi_bdev = bdev->bd_contains;
1588 : :
1589 : 1483590 : trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1590 : : bdev->bd_dev,
1591 : 494530 : bio->bi_sector - p->start_sect);
1592 : : }
1593 : : }
1594 : :
1595 : 0 : static void handle_bad_sector(struct bio *bio)
1596 : : {
1597 : : char b[BDEVNAME_SIZE];
1598 : :
1599 : 0 : printk(KERN_INFO "attempt to access beyond end of device\n");
1600 : 0 : printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1601 : : bdevname(bio->bi_bdev, b),
1602 : : bio->bi_rw,
1603 : 0 : (unsigned long long)bio_end_sector(bio),
1604 : 0 : (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1605 : :
1606 : 0 : set_bit(BIO_EOF, &bio->bi_flags);
1607 : 0 : }
1608 : :
1609 : : #ifdef CONFIG_FAIL_MAKE_REQUEST
1610 : :
1611 : : static DECLARE_FAULT_ATTR(fail_make_request);
1612 : :
1613 : : static int __init setup_fail_make_request(char *str)
1614 : : {
1615 : : return setup_fault_attr(&fail_make_request, str);
1616 : : }
1617 : : __setup("fail_make_request=", setup_fail_make_request);
1618 : :
1619 : : static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1620 : : {
1621 : : return part->make_it_fail && should_fail(&fail_make_request, bytes);
1622 : : }
1623 : :
1624 : : static int __init fail_make_request_debugfs(void)
1625 : : {
1626 : : struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1627 : : NULL, &fail_make_request);
1628 : :
1629 : : return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1630 : : }
1631 : :
1632 : : late_initcall(fail_make_request_debugfs);
1633 : :
1634 : : #else /* CONFIG_FAIL_MAKE_REQUEST */
1635 : :
1636 : : static inline bool should_fail_request(struct hd_struct *part,
1637 : : unsigned int bytes)
1638 : : {
1639 : : return false;
1640 : : }
1641 : :
1642 : : #endif /* CONFIG_FAIL_MAKE_REQUEST */
1643 : :
1644 : : /*
1645 : : * Check whether this bio extends beyond the end of the device.
1646 : : */
1647 : : static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1648 : : {
1649 : : sector_t maxsector;
1650 : :
1651 [ + + ][ + + ]: 995531 : if (!nr_sectors)
1652 : : return 0;
1653 : :
1654 : : /* Test device or partition size, when known. */
1655 : 989136 : maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1656 [ + + ][ + - ]: 989143 : if (maxsector) {
1657 : 989125 : sector_t sector = bio->bi_sector;
1658 : :
1659 [ + + ][ + ]: 989125 : if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
[ + + ][ + ]
1660 : : /*
1661 : : * This may well happen - the kernel calls bread()
1662 : : * without checking the size of the device, e.g., when
1663 : : * mounting a device.
1664 : : */
1665 : 33 : handle_bad_sector(bio);
1666 : : return 1;
1667 : : }
1668 : : }
1669 : :
1670 : : return 0;
1671 : : }
1672 : :
1673 : : static noinline_for_stack bool
1674 : 0 : generic_make_request_checks(struct bio *bio)
1675 : : {
1676 : 494522 : struct request_queue *q;
1677 : 497762 : int nr_sectors = bio_sectors(bio);
1678 : : int err = -EIO;
1679 : : char b[BDEVNAME_SIZE];
1680 : : struct hd_struct *part;
1681 : :
1682 : : might_sleep();
1683 : :
1684 [ + - ]: 497763 : if (bio_check_eod(bio, nr_sectors))
1685 : : goto end_io;
1686 : :
1687 : 497763 : q = bdev_get_queue(bio->bi_bdev);
1688 [ - + ]: 497763 : if (unlikely(!q)) {
1689 : 0 : printk(KERN_ERR
1690 : : "generic_make_request: Trying to access "
1691 : : "nonexistent block-device %s (%Lu)\n",
1692 : : bdevname(bio->bi_bdev, b),
1693 : 0 : (long long) bio->bi_sector);
1694 : 0 : goto end_io;
1695 : : }
1696 : :
1697 [ + + ][ - + ]: 497763 : if (likely(bio_is_rw(bio) &&
1698 : : nr_sectors > queue_max_hw_sectors(q))) {
1699 : 0 : printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1700 : : bdevname(bio->bi_bdev, b),
1701 : 0 : bio_sectors(bio),
1702 : : queue_max_hw_sectors(q));
1703 : 0 : goto end_io;
1704 : : }
1705 : :
1706 : 497763 : part = bio->bi_bdev->bd_part;
1707 : : if (should_fail_request(part, bio->bi_size) ||
1708 : : should_fail_request(&part_to_disk(part)->part0,
1709 : : bio->bi_size))
1710 : : goto end_io;
1711 : :
1712 : : /*
1713 : : * If this device has partitions, remap block n
1714 : : * of partition p to block n+start(p) of the disk.
1715 : : */
1716 : : blk_partition_remap(bio);
1717 : :
1718 [ + - ]: 497742 : if (bio_check_eod(bio, nr_sectors))
1719 : : goto end_io;
1720 : :
1721 : : /*
1722 : : * Filter flush bio's early so that make_request based
1723 : : * drivers without flush support don't have to worry
1724 : : * about them.
1725 : : */
1726 [ + + ][ + ]: 497742 : if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1727 : 23862 : bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1728 [ + + ]: 23862 : if (!nr_sectors) {
1729 : : err = 0;
1730 : : goto end_io;
1731 : : }
1732 : : }
1733 : :
1734 [ - + ][ # # ]: 494536 : if ((bio->bi_rw & REQ_DISCARD) &&
1735 [ # # ]: 0 : (!blk_queue_discard(q) ||
1736 [ # # ][ # # ]: 0 : ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
1737 : : err = -EOPNOTSUPP;
1738 : : goto end_io;
1739 : : }
1740 : :
1741 [ - + ][ # # ]: 494536 : if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
1742 : : err = -EOPNOTSUPP;
1743 : : goto end_io;
1744 : : }
1745 : :
1746 : : /*
1747 : : * Various block parts want %current->io_context and lazy ioc
1748 : : * allocation ends up trading a lot of pain for a small amount of
1749 : : * memory. Just allocate it upfront. This may fail and block
1750 : : * layer knows how to live with it.
1751 : : */
1752 : 494536 : create_io_context(GFP_ATOMIC, q->node);
1753 : :
1754 : : if (blk_throtl_bio(q, bio))
1755 : : return false; /* throttled, will be resubmitted later */
1756 : :
1757 : : trace_block_bio_queue(q, bio);
1758 : : return true;
1759 : :
1760 : : end_io:
1761 : 3206 : bio_endio(bio, err);
1762 : 3194 : return false;
1763 : : }
1764 : :
1765 : : /**
1766 : : * generic_make_request - hand a buffer to its device driver for I/O
1767 : : * @bio: The bio describing the location in memory and on the device.
1768 : : *
1769 : : * generic_make_request() is used to make I/O requests of block
1770 : : * devices. It is passed a &struct bio, which describes the I/O that needs
1771 : : * to be done.
1772 : : *
1773 : : * generic_make_request() does not return any status. The
1774 : : * success/failure status of the request, along with notification of
1775 : : * completion, is delivered asynchronously through the bio->bi_end_io
1776 : : * function described (one day) else where.
1777 : : *
1778 : : * The caller of generic_make_request must make sure that bi_io_vec
1779 : : * are set to describe the memory buffer, and that bi_dev and bi_sector are
1780 : : * set to describe the device address, and the
1781 : : * bi_end_io and optionally bi_private are set to describe how
1782 : : * completion notification should be signaled.
1783 : : *
1784 : : * generic_make_request and the drivers it calls may use bi_next if this
1785 : : * bio happens to be merged with someone else, and may resubmit the bio to
1786 : : * a lower device by calling into generic_make_request recursively, which
1787 : : * means the bio should NOT be touched after the call to ->make_request_fn.
1788 : : */
1789 : 0 : void generic_make_request(struct bio *bio)
1790 : : {
1791 : : struct bio_list bio_list_on_stack;
1792 : :
1793 [ + + ]: 497769 : if (!generic_make_request_checks(bio))
1794 : 3191 : return;
1795 : :
1796 : : /*
1797 : : * We only want one ->make_request_fn to be active at a time, else
1798 : : * stack usage with stacked devices could be a problem. So use
1799 : : * current->bio_list to keep a list of requests submited by a
1800 : : * make_request_fn function. current->bio_list is also used as a
1801 : : * flag to say if generic_make_request is currently active in this
1802 : : * task or not. If it is NULL, then no make_request is active. If
1803 : : * it is non-NULL, then a make_request is active, and new requests
1804 : : * should be added at the tail
1805 : : */
1806 [ - + ]: 494510 : if (current->bio_list) {
1807 : 0 : bio_list_add(current->bio_list, bio);
1808 : : return;
1809 : : }
1810 : :
1811 : : /* following loop may be a bit non-obvious, and so deserves some
1812 : : * explanation.
1813 : : * Before entering the loop, bio->bi_next is NULL (as all callers
1814 : : * ensure that) so we have a list with a single bio.
1815 : : * We pretend that we have just taken it off a longer list, so
1816 : : * we assign bio_list to a pointer to the bio_list_on_stack,
1817 : : * thus initialising the bio_list of new bios to be
1818 : : * added. ->make_request() may indeed add some more bios
1819 : : * through a recursive call to generic_make_request. If it
1820 : : * did, we find a non-NULL value in bio_list and re-enter the loop
1821 : : * from the top. In this case we really did just take the bio
1822 : : * of the top of the list (no pretending) and so remove it from
1823 : : * bio_list, and call into ->make_request() again.
1824 : : */
1825 [ - + ]: 494510 : BUG_ON(bio->bi_next);
1826 : : bio_list_init(&bio_list_on_stack);
1827 : 494510 : current->bio_list = &bio_list_on_stack;
1828 : : do {
1829 : 494504 : struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1830 : :
1831 : 494504 : q->make_request_fn(q, bio);
1832 : :
1833 : 494548 : bio = bio_list_pop(current->bio_list);
1834 [ + ]: 494548 : } while (bio);
1835 : 494554 : current->bio_list = NULL; /* deactivate */
1836 : : }
1837 : : EXPORT_SYMBOL(generic_make_request);
1838 : :
1839 : : /**
1840 : : * submit_bio - submit a bio to the block device layer for I/O
1841 : : * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1842 : : * @bio: The &struct bio which describes the I/O
1843 : : *
1844 : : * submit_bio() is very similar in purpose to generic_make_request(), and
1845 : : * uses that function to do most of the work. Both are fairly rough
1846 : : * interfaces; @bio must be presetup and ready for I/O.
1847 : : *
1848 : : */
1849 : 0 : void submit_bio(int rw, struct bio *bio)
1850 : : {
1851 : 497770 : bio->bi_rw |= rw;
1852 : :
1853 : : /*
1854 : : * If it's a regular read/write or a barrier with data attached,
1855 : : * go through the normal accounting stuff before submission.
1856 : : */
1857 [ + ]: 497770 : if (bio_has_data(bio)) {
1858 : : unsigned int count;
1859 : :
1860 [ + + ]: 992349 : if (unlikely(rw & REQ_WRITE_SAME))
1861 : 3 : count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1862 : : else
1863 : 494576 : count = bio_sectors(bio);
1864 : :
1865 [ + + ]: 494579 : if (rw & WRITE) {
1866 : : count_vm_events(PGPGOUT, count);
1867 : : } else {
1868 : : task_io_account_read(bio->bi_size);
1869 : : count_vm_events(PGPGIN, count);
1870 : : }
1871 : :
1872 [ - + ]: 494575 : if (unlikely(block_dump)) {
1873 : : char b[BDEVNAME_SIZE];
1874 [ # # ]: 0 : printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1875 : 0 : current->comm, task_pid_nr(current),
1876 : : (rw & WRITE) ? "WRITE" : "READ",
1877 : : (unsigned long long)bio->bi_sector,
1878 : : bdevname(bio->bi_bdev, b),
1879 : : count);
1880 : : }
1881 : : }
1882 : :
1883 : 0 : generic_make_request(bio);
1884 : 497694 : }
1885 : : EXPORT_SYMBOL(submit_bio);
1886 : :
1887 : : /**
1888 : : * blk_rq_check_limits - Helper function to check a request for the queue limit
1889 : : * @q: the queue
1890 : : * @rq: the request being checked
1891 : : *
1892 : : * Description:
1893 : : * @rq may have been made based on weaker limitations of upper-level queues
1894 : : * in request stacking drivers, and it may violate the limitation of @q.
1895 : : * Since the block layer and the underlying device driver trust @rq
1896 : : * after it is inserted to @q, it should be checked against @q before
1897 : : * the insertion using this generic function.
1898 : : *
1899 : : * This function should also be useful for request stacking drivers
1900 : : * in some cases below, so export this function.
1901 : : * Request stacking drivers like request-based dm may change the queue
1902 : : * limits while requests are in the queue (e.g. dm's table swapping).
1903 : : * Such request stacking drivers should check those requests agaist
1904 : : * the new queue limits again when they dispatch those requests,
1905 : : * although such checkings are also done against the old queue limits
1906 : : * when submitting requests.
1907 : : */
1908 : 0 : int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1909 : : {
1910 [ # # ]: 0 : if (!rq_mergeable(rq))
1911 : : return 0;
1912 : :
1913 [ # # ]: 0 : if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
1914 : 0 : printk(KERN_ERR "%s: over max size limit.\n", __func__);
1915 : 0 : return -EIO;
1916 : : }
1917 : :
1918 : : /*
1919 : : * queue's settings related to segment counting like q->bounce_pfn
1920 : : * may differ from that of other stacking queues.
1921 : : * Recalculate it to check the request correctly on this queue's
1922 : : * limitation.
1923 : : */
1924 : 0 : blk_recalc_rq_segments(rq);
1925 [ # # ]: 0 : if (rq->nr_phys_segments > queue_max_segments(q)) {
1926 : 0 : printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1927 : 0 : return -EIO;
1928 : : }
1929 : :
1930 : : return 0;
1931 : : }
1932 : : EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1933 : :
1934 : : /**
1935 : : * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1936 : : * @q: the queue to submit the request
1937 : : * @rq: the request being queued
1938 : : */
1939 : 0 : int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1940 : : {
1941 : : unsigned long flags;
1942 : : int where = ELEVATOR_INSERT_BACK;
1943 : :
1944 [ # # ]: 0 : if (blk_rq_check_limits(q, rq))
1945 : : return -EIO;
1946 : :
1947 : : if (rq->rq_disk &&
1948 : : should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1949 : : return -EIO;
1950 : :
1951 : 0 : spin_lock_irqsave(q->queue_lock, flags);
1952 [ # # ]: 0 : if (unlikely(blk_queue_dying(q))) {
1953 : 0 : spin_unlock_irqrestore(q->queue_lock, flags);
1954 : 0 : return -ENODEV;
1955 : : }
1956 : :
1957 : : /*
1958 : : * Submitting request must be dequeued before calling this function
1959 : : * because it will be linked to another request_queue
1960 : : */
1961 [ # # ]: 0 : BUG_ON(blk_queued_rq(rq));
1962 : :
1963 [ # # ]: 0 : if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1964 : : where = ELEVATOR_INSERT_FLUSH;
1965 : :
1966 : : add_acct_request(q, rq, where);
1967 [ # # ]: 0 : if (where == ELEVATOR_INSERT_FLUSH)
1968 : 0 : __blk_run_queue(q);
1969 : 0 : spin_unlock_irqrestore(q->queue_lock, flags);
1970 : :
1971 : 0 : return 0;
1972 : : }
1973 : : EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1974 : :
1975 : : /**
1976 : : * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1977 : : * @rq: request to examine
1978 : : *
1979 : : * Description:
1980 : : * A request could be merge of IOs which require different failure
1981 : : * handling. This function determines the number of bytes which
1982 : : * can be failed from the beginning of the request without
1983 : : * crossing into area which need to be retried further.
1984 : : *
1985 : : * Return:
1986 : : * The number of bytes to fail.
1987 : : *
1988 : : * Context:
1989 : : * queue_lock must be held.
1990 : : */
1991 : 0 : unsigned int blk_rq_err_bytes(const struct request *rq)
1992 : : {
1993 : 0 : unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1994 : : unsigned int bytes = 0;
1995 : : struct bio *bio;
1996 : :
1997 [ # # ]: 0 : if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1998 : 0 : return blk_rq_bytes(rq);
1999 : :
2000 : : /*
2001 : : * Currently the only 'mixing' which can happen is between
2002 : : * different fastfail types. We can safely fail portions
2003 : : * which have all the failfast bits that the first one has -
2004 : : * the ones which are at least as eager to fail as the first
2005 : : * one.
2006 : : */
2007 [ # # ]: 0 : for (bio = rq->bio; bio; bio = bio->bi_next) {
2008 [ # # ]: 0 : if ((bio->bi_rw & ff) != ff)
2009 : : break;
2010 : 0 : bytes += bio->bi_size;
2011 : : }
2012 : :
2013 : : /* this could lead to infinite loop */
2014 [ # # ][ # # ]: 0 : BUG_ON(blk_rq_bytes(rq) && !bytes);
2015 : : return bytes;
2016 : : }
2017 : : EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2018 : :
2019 : 0 : void blk_account_io_completion(struct request *req, unsigned int bytes)
2020 : : {
2021 [ + - ]: 394901 : if (blk_do_io_stat(req)) {
2022 : 394901 : const int rw = rq_data_dir(req);
2023 : : struct hd_struct *part;
2024 : : int cpu;
2025 : :
2026 : 394901 : cpu = part_stat_lock();
2027 : 394901 : part = req->part;
2028 [ + - ]: 789802 : part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2029 : 394901 : part_stat_unlock();
2030 : : }
2031 : 394901 : }
2032 : :
2033 : 0 : void blk_account_io_done(struct request *req)
2034 : : {
2035 : : /*
2036 : : * Account IO completion. flush_rq isn't accounted as a
2037 : : * normal IO on queueing nor completion. Accounting the
2038 : : * containing request is enough.
2039 : : */
2040 [ + + ][ + - ]: 433718 : if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
2041 : 394901 : unsigned long duration = jiffies - req->start_time;
2042 : 394901 : const int rw = rq_data_dir(req);
2043 : : struct hd_struct *part;
2044 : : int cpu;
2045 : :
2046 : 394901 : cpu = part_stat_lock();
2047 : 394901 : part = req->part;
2048 : :
2049 [ + - ]: 789802 : part_stat_inc(cpu, part, ios[rw]);
2050 [ + - ]: 789802 : part_stat_add(cpu, part, ticks[rw], duration);
2051 : 394901 : part_round_stats(cpu, part);
2052 : : part_dec_in_flight(part, rw);
2053 : :
2054 : : hd_struct_put(part);
2055 : 394901 : part_stat_unlock();
2056 : : }
2057 : 433718 : }
2058 : :
2059 : : #ifdef CONFIG_PM_RUNTIME
2060 : : /*
2061 : : * Don't process normal requests when queue is suspended
2062 : : * or in the process of suspending/resuming
2063 : : */
2064 : : static struct request *blk_pm_peek_request(struct request_queue *q,
2065 : : struct request *rq)
2066 : : {
2067 : : if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2068 : : (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
2069 : : return NULL;
2070 : : else
2071 : : return rq;
2072 : : }
2073 : : #else
2074 : : static inline struct request *blk_pm_peek_request(struct request_queue *q,
2075 : : struct request *rq)
2076 : : {
2077 : : return rq;
2078 : : }
2079 : : #endif
2080 : :
2081 : 0 : void blk_account_io_start(struct request *rq, bool new_io)
2082 : : {
2083 : : struct hd_struct *part;
2084 : 494555 : int rw = rq_data_dir(rq);
2085 : : int cpu;
2086 : :
2087 [ + + ]: 494555 : if (!blk_do_io_stat(rq))
2088 : 494584 : return;
2089 : :
2090 : 494534 : cpu = part_stat_lock();
2091 : :
2092 [ + + ]: 494536 : if (!new_io) {
2093 : 98331 : part = rq->part;
2094 [ + - ]: 196662 : part_stat_inc(cpu, part, merges[rw]);
2095 : : } else {
2096 : 396205 : part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2097 [ - + ]: 396219 : if (!hd_struct_try_get(part)) {
2098 : : /*
2099 : : * The partition is already being removed,
2100 : : * the request will be accounted on the disk only
2101 : : *
2102 : : * We take a reference on disk->part0 although that
2103 : : * partition will never be deleted, so we can treat
2104 : : * it as any other partition.
2105 : : */
2106 : 0 : part = &rq->rq_disk->part0;
2107 : : hd_struct_get(part);
2108 : : }
2109 : 396219 : part_round_stats(cpu, part);
2110 : : part_inc_in_flight(part, rw);
2111 : 396246 : rq->part = part;
2112 : : }
2113 : :
2114 : 494577 : part_stat_unlock();
2115 : : }
2116 : :
2117 : : /**
2118 : : * blk_peek_request - peek at the top of a request queue
2119 : : * @q: request queue to peek at
2120 : : *
2121 : : * Description:
2122 : : * Return the request at the top of @q. The returned request
2123 : : * should be started using blk_start_request() before LLD starts
2124 : : * processing it.
2125 : : *
2126 : : * Return:
2127 : : * Pointer to the request at the top of @q if available. Null
2128 : : * otherwise.
2129 : : *
2130 : : * Context:
2131 : : * queue_lock must be held.
2132 : : */
2133 : 0 : struct request *blk_peek_request(struct request_queue *q)
2134 : : {
2135 : 612912 : struct request *rq;
2136 : : int ret;
2137 : :
2138 [ + + ]: 2902864 : while ((rq = __elv_next_request(q)) != NULL) {
2139 : :
2140 : : rq = blk_pm_peek_request(q, rq);
2141 [ + - ]: 612912 : if (!rq)
2142 : : break;
2143 : :
2144 [ + + ]: 612912 : if (!(rq->cmd_flags & REQ_STARTED)) {
2145 : : /*
2146 : : * This is the first time the device driver
2147 : : * sees this request (possibly after
2148 : : * requeueing). Notify IO scheduler.
2149 : : */
2150 [ + + ]: 433719 : if (rq->cmd_flags & REQ_SORTED)
2151 : : elv_activate_rq(q, rq);
2152 : :
2153 : : /*
2154 : : * just mark as started even if we don't start
2155 : : * it, a request that has been delayed should
2156 : : * not be passed by new incoming requests
2157 : : */
2158 : 433719 : rq->cmd_flags |= REQ_STARTED;
2159 : : trace_block_rq_issue(q, rq);
2160 : : }
2161 : :
2162 [ - + ][ # # ]: 612912 : if (!q->boundary_rq || q->boundary_rq == rq) {
2163 : 612912 : q->end_sector = rq_end_sector(rq);
2164 : 612912 : q->boundary_rq = NULL;
2165 : : }
2166 : :
2167 [ + + ]: 612912 : if (rq->cmd_flags & REQ_DONTPREP)
2168 : : break;
2169 : :
2170 [ - + ][ # # ]: 433719 : if (q->dma_drain_size && blk_rq_bytes(rq)) {
2171 : : /*
2172 : : * make sure space for the drain appears we
2173 : : * know we can do this because max_hw_segments
2174 : : * has been adjusted to be one fewer than the
2175 : : * device can handle
2176 : : */
2177 : 0 : rq->nr_phys_segments++;
2178 : : }
2179 : :
2180 [ + - ]: 433719 : if (!q->prep_rq_fn)
2181 : : break;
2182 : :
2183 : 433719 : ret = q->prep_rq_fn(q, rq);
2184 [ - + ]: 433719 : if (ret == BLKPREP_OK) {
2185 : : break;
2186 [ # # ]: 0 : } else if (ret == BLKPREP_DEFER) {
2187 : : /*
2188 : : * the request may have been (partially) prepped.
2189 : : * we need to keep this request in the front to
2190 : : * avoid resource deadlock. REQ_STARTED will
2191 : : * prevent other fs requests from passing this one.
2192 : : */
2193 [ # # ][ # # ]: 0 : if (q->dma_drain_size && blk_rq_bytes(rq) &&
[ # # ]
2194 : 0 : !(rq->cmd_flags & REQ_DONTPREP)) {
2195 : : /*
2196 : : * remove the space for the drain we added
2197 : : * so that we don't add it again
2198 : : */
2199 : 0 : --rq->nr_phys_segments;
2200 : : }
2201 : :
2202 : : rq = NULL;
2203 : : break;
2204 [ # # ]: 0 : } else if (ret == BLKPREP_KILL) {
2205 : 0 : rq->cmd_flags |= REQ_QUIET;
2206 : : /*
2207 : : * Mark this request as started so we don't trigger
2208 : : * any debug logic in the end I/O path.
2209 : : */
2210 : 0 : blk_start_request(rq);
2211 : 0 : __blk_end_request_all(rq, -EIO);
2212 : : } else {
2213 : 0 : printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2214 : 394901 : break;
2215 : : }
2216 : : }
2217 : :
2218 : 1451432 : return rq;
2219 : : }
2220 : : EXPORT_SYMBOL(blk_peek_request);
2221 : :
2222 : 0 : void blk_dequeue_request(struct request *rq)
2223 : : {
2224 : 433719 : struct request_queue *q = rq->q;
2225 : :
2226 [ - + ]: 433719 : BUG_ON(list_empty(&rq->queuelist));
2227 [ - + ]: 433719 : BUG_ON(ELV_ON_HASH(rq));
2228 : :
2229 : : list_del_init(&rq->queuelist);
2230 : :
2231 : : /*
2232 : : * the time frame between a request being removed from the lists
2233 : : * and to it is freed is accounted as io that is in progress at
2234 : : * the driver side.
2235 : : */
2236 [ + - ][ + ]: 433719 : if (blk_account_rq(rq)) {
2237 : 394901 : q->in_flight[rq_is_sync(rq)]++;
2238 : : set_io_start_time_ns(rq);
2239 : : }
2240 : 0 : }
2241 : :
2242 : : /**
2243 : : * blk_start_request - start request processing on the driver
2244 : : * @req: request to dequeue
2245 : : *
2246 : : * Description:
2247 : : * Dequeue @req and start timeout timer on it. This hands off the
2248 : : * request to the driver.
2249 : : *
2250 : : * Block internal functions which don't want to start timer should
2251 : : * call blk_dequeue_request().
2252 : : *
2253 : : * Context:
2254 : : * queue_lock must be held.
2255 : : */
2256 : 0 : void blk_start_request(struct request *req)
2257 : : {
2258 : 433719 : blk_dequeue_request(req);
2259 : :
2260 : : /*
2261 : : * We are now handing the request to the hardware, initialize
2262 : : * resid_len to full count and add the timeout handler.
2263 : : */
2264 : 433719 : req->resid_len = blk_rq_bytes(req);
2265 [ - + ]: 867438 : if (unlikely(blk_bidi_rq(req)))
2266 : 0 : req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2267 : :
2268 [ - + ]: 433719 : BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
2269 : 433719 : blk_add_timer(req);
2270 : 433719 : }
2271 : : EXPORT_SYMBOL(blk_start_request);
2272 : :
2273 : : /**
2274 : : * blk_fetch_request - fetch a request from a request queue
2275 : : * @q: request queue to fetch a request from
2276 : : *
2277 : : * Description:
2278 : : * Return the request at the top of @q. The request is started on
2279 : : * return and LLD can start processing it immediately.
2280 : : *
2281 : : * Return:
2282 : : * Pointer to the request at the top of @q if available. Null
2283 : : * otherwise.
2284 : : *
2285 : : * Context:
2286 : : * queue_lock must be held.
2287 : : */
2288 : 0 : struct request *blk_fetch_request(struct request_queue *q)
2289 : : {
2290 : : struct request *rq;
2291 : :
2292 : 0 : rq = blk_peek_request(q);
2293 [ # # ]: 0 : if (rq)
2294 : 0 : blk_start_request(rq);
2295 : 0 : return rq;
2296 : : }
2297 : : EXPORT_SYMBOL(blk_fetch_request);
2298 : :
2299 : : /**
2300 : : * blk_update_request - Special helper function for request stacking drivers
2301 : : * @req: the request being processed
2302 : : * @error: %0 for success, < %0 for error
2303 : : * @nr_bytes: number of bytes to complete @req
2304 : : *
2305 : : * Description:
2306 : : * Ends I/O on a number of bytes attached to @req, but doesn't complete
2307 : : * the request structure even if @req doesn't have leftover.
2308 : : * If @req has leftover, sets it up for the next range of segments.
2309 : : *
2310 : : * This special helper function is only for request stacking drivers
2311 : : * (e.g. request-based dm) so that they can handle partial completion.
2312 : : * Actual device drivers should use blk_end_request instead.
2313 : : *
2314 : : * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2315 : : * %false return from this function.
2316 : : *
2317 : : * Return:
2318 : : * %false - this request doesn't have any more data
2319 : : * %true - this request has more data
2320 : : **/
2321 : 0 : bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2322 : : {
2323 : : int total_bytes;
2324 : :
2325 [ + + ]: 433718 : if (!req->bio)
2326 : : return false;
2327 : :
2328 : 394901 : trace_block_rq_complete(req->q, req);
2329 : :
2330 : : /*
2331 : : * For fs requests, rq is just carrier of independent bio's
2332 : : * and each partial completion should be handled separately.
2333 : : * Reset per-request error on each partial completion.
2334 : : *
2335 : : * TODO: tj: This is too subtle. It would be better to let
2336 : : * low level drivers do what they see fit.
2337 : : */
2338 [ + - ]: 394901 : if (req->cmd_type == REQ_TYPE_FS)
2339 : 394901 : req->errors = 0;
2340 : :
2341 [ - + ][ # # ]: 394901 : if (error && req->cmd_type == REQ_TYPE_FS &&
[ # # ]
2342 : 0 : !(req->cmd_flags & REQ_QUIET)) {
2343 : : char *error_type;
2344 : :
2345 [ # # # # : 0 : switch (error) {
# # # ]
2346 : : case -ENOLINK:
2347 : : error_type = "recoverable transport";
2348 : : break;
2349 : : case -EREMOTEIO:
2350 : : error_type = "critical target";
2351 : 0 : break;
2352 : : case -EBADE:
2353 : : error_type = "critical nexus";
2354 : 0 : break;
2355 : : case -ETIMEDOUT:
2356 : : error_type = "timeout";
2357 : 0 : break;
2358 : : case -ENOSPC:
2359 : : error_type = "critical space allocation";
2360 : 0 : break;
2361 : : case -ENODATA:
2362 : : error_type = "critical medium";
2363 : 0 : break;
2364 : : case -EIO:
2365 : : default:
2366 : : error_type = "I/O";
2367 : 0 : break;
2368 : : }
2369 [ # # ][ # # ]: 0 : printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2370 : : error_type, req->rq_disk ?
2371 : : req->rq_disk->disk_name : "?",
2372 : : (unsigned long long)blk_rq_pos(req));
2373 : :
2374 : : }
2375 : :
2376 : 394901 : blk_account_io_completion(req, nr_bytes);
2377 : :
2378 : : total_bytes = 0;
2379 [ + - ]: 494579 : while (req->bio) {
2380 : : struct bio *bio = req->bio;
2381 : 494579 : unsigned bio_bytes = min(bio->bi_size, nr_bytes);
2382 : :
2383 [ + - ]: 494579 : if (bio_bytes == bio->bi_size)
2384 : 494579 : req->bio = bio->bi_next;
2385 : :
2386 : 494579 : req_bio_endio(req, bio, bio_bytes, error);
2387 : :
2388 : 494579 : total_bytes += bio_bytes;
2389 : 494579 : nr_bytes -= bio_bytes;
2390 : :
2391 [ + + ]: 494579 : if (!nr_bytes)
2392 : : break;
2393 : : }
2394 : :
2395 : : /*
2396 : : * completely done
2397 : : */
2398 [ + - ]: 394901 : if (!req->bio) {
2399 : : /*
2400 : : * Reset counters so that the request stacking driver
2401 : : * can find how many bytes remain in the request
2402 : : * later.
2403 : : */
2404 : 394901 : req->__data_len = 0;
2405 : 394901 : return false;
2406 : : }
2407 : :
2408 : 0 : req->__data_len -= total_bytes;
2409 : 0 : req->buffer = bio_data(req->bio);
2410 : :
2411 : : /* update sector only for requests with clear definition of sector */
2412 [ # # ]: 0 : if (req->cmd_type == REQ_TYPE_FS)
2413 : 0 : req->__sector += total_bytes >> 9;
2414 : :
2415 : : /* mixed attributes always follow the first bio */
2416 [ # # ]: 0 : if (req->cmd_flags & REQ_MIXED_MERGE) {
2417 : 0 : req->cmd_flags &= ~REQ_FAILFAST_MASK;
2418 : 0 : req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2419 : : }
2420 : :
2421 : : /*
2422 : : * If total number of sectors is less than the first segment
2423 : : * size, something has gone terribly wrong.
2424 : : */
2425 [ # # ]: 0 : if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2426 : 0 : blk_dump_rq_flags(req, "request botched");
2427 : 0 : req->__data_len = blk_rq_cur_bytes(req);
2428 : : }
2429 : :
2430 : : /* recalculate the number of segments */
2431 : 0 : blk_recalc_rq_segments(req);
2432 : :
2433 : 0 : return true;
2434 : : }
2435 : : EXPORT_SYMBOL_GPL(blk_update_request);
2436 : :
2437 : 0 : static bool blk_update_bidi_request(struct request *rq, int error,
2438 : : unsigned int nr_bytes,
2439 : : unsigned int bidi_bytes)
2440 : : {
2441 [ + - ]: 433718 : if (blk_update_request(rq, error, nr_bytes))
2442 : : return true;
2443 : :
2444 : : /* Bidi request must be completed as a whole */
2445 [ - + # # ]: 433718 : if (unlikely(blk_bidi_rq(rq)) &&
2446 : 0 : blk_update_request(rq->next_rq, error, bidi_bytes))
2447 : : return true;
2448 : :
2449 [ + - ]: 433718 : if (blk_queue_add_random(rq->q))
2450 : 433718 : add_disk_randomness(rq->rq_disk);
2451 : :
2452 : : return false;
2453 : : }
2454 : :
2455 : : /**
2456 : : * blk_unprep_request - unprepare a request
2457 : : * @req: the request
2458 : : *
2459 : : * This function makes a request ready for complete resubmission (or
2460 : : * completion). It happens only after all error handling is complete,
2461 : : * so represents the appropriate moment to deallocate any resources
2462 : : * that were allocated to the request in the prep_rq_fn. The queue
2463 : : * lock is held when calling this.
2464 : : */
2465 : 0 : void blk_unprep_request(struct request *req)
2466 : : {
2467 : 433718 : struct request_queue *q = req->q;
2468 : :
2469 : 433718 : req->cmd_flags &= ~REQ_DONTPREP;
2470 [ + - ][ # # ]: 433718 : if (q->unprep_rq_fn)
2471 : 433718 : q->unprep_rq_fn(q, req);
2472 : 0 : }
2473 : : EXPORT_SYMBOL_GPL(blk_unprep_request);
2474 : :
2475 : : /*
2476 : : * queue lock must be held
2477 : : */
2478 : 0 : static void blk_finish_request(struct request *req, int error)
2479 : : {
2480 [ - + ]: 433718 : if (blk_rq_tagged(req))
2481 : 0 : blk_queue_end_tag(req->q, req);
2482 : :
2483 [ - + ]: 433718 : BUG_ON(blk_queued_rq(req));
2484 : :
2485 [ - + ][ # # ]: 433718 : if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2486 : 0 : laptop_io_completion(&req->q->backing_dev_info);
2487 : :
2488 : 433718 : blk_delete_timer(req);
2489 : :
2490 [ + - ]: 433718 : if (req->cmd_flags & REQ_DONTPREP)
2491 : : blk_unprep_request(req);
2492 : :
2493 : 433718 : blk_account_io_done(req);
2494 : :
2495 [ + + ]: 433718 : if (req->end_io)
2496 : 38817 : req->end_io(req, error);
2497 : : else {
2498 [ - + ]: 394901 : if (blk_bidi_rq(req))
2499 : 0 : __blk_put_request(req->next_rq->q, req->next_rq);
2500 : :
2501 : 394901 : __blk_put_request(req->q, req);
2502 : : }
2503 : 433718 : }
2504 : :
2505 : : /**
2506 : : * blk_end_bidi_request - Complete a bidi request
2507 : : * @rq: the request to complete
2508 : : * @error: %0 for success, < %0 for error
2509 : : * @nr_bytes: number of bytes to complete @rq
2510 : : * @bidi_bytes: number of bytes to complete @rq->next_rq
2511 : : *
2512 : : * Description:
2513 : : * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2514 : : * Drivers that supports bidi can safely call this member for any
2515 : : * type of request, bidi or uni. In the later case @bidi_bytes is
2516 : : * just ignored.
2517 : : *
2518 : : * Return:
2519 : : * %false - we are done with this request
2520 : : * %true - still buffers pending for this request
2521 : : **/
2522 : 0 : static bool blk_end_bidi_request(struct request *rq, int error,
2523 : : unsigned int nr_bytes, unsigned int bidi_bytes)
2524 : : {
2525 : 433718 : struct request_queue *q = rq->q;
2526 : : unsigned long flags;
2527 : :
2528 [ + - ]: 433718 : if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2529 : : return true;
2530 : :
2531 : 433718 : spin_lock_irqsave(q->queue_lock, flags);
2532 : 433718 : blk_finish_request(rq, error);
2533 : 433718 : spin_unlock_irqrestore(q->queue_lock, flags);
2534 : :
2535 : 433718 : return false;
2536 : : }
2537 : :
2538 : : /**
2539 : : * __blk_end_bidi_request - Complete a bidi request with queue lock held
2540 : : * @rq: the request to complete
2541 : : * @error: %0 for success, < %0 for error
2542 : : * @nr_bytes: number of bytes to complete @rq
2543 : : * @bidi_bytes: number of bytes to complete @rq->next_rq
2544 : : *
2545 : : * Description:
2546 : : * Identical to blk_end_bidi_request() except that queue lock is
2547 : : * assumed to be locked on entry and remains so on return.
2548 : : *
2549 : : * Return:
2550 : : * %false - we are done with this request
2551 : : * %true - still buffers pending for this request
2552 : : **/
2553 : 0 : bool __blk_end_bidi_request(struct request *rq, int error,
2554 : : unsigned int nr_bytes, unsigned int bidi_bytes)
2555 : : {
2556 [ # # ]: 0 : if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2557 : : return true;
2558 : :
2559 : 0 : blk_finish_request(rq, error);
2560 : :
2561 : 0 : return false;
2562 : : }
2563 : :
2564 : : /**
2565 : : * blk_end_request - Helper function for drivers to complete the request.
2566 : : * @rq: the request being processed
2567 : : * @error: %0 for success, < %0 for error
2568 : : * @nr_bytes: number of bytes to complete
2569 : : *
2570 : : * Description:
2571 : : * Ends I/O on a number of bytes attached to @rq.
2572 : : * If @rq has leftover, sets it up for the next range of segments.
2573 : : *
2574 : : * Return:
2575 : : * %false - we are done with this request
2576 : : * %true - still buffers pending for this request
2577 : : **/
2578 : 0 : bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2579 : : {
2580 : 433718 : return blk_end_bidi_request(rq, error, nr_bytes, 0);
2581 : : }
2582 : : EXPORT_SYMBOL(blk_end_request);
2583 : :
2584 : : /**
2585 : : * blk_end_request_all - Helper function for drives to finish the request.
2586 : : * @rq: the request to finish
2587 : : * @error: %0 for success, < %0 for error
2588 : : *
2589 : : * Description:
2590 : : * Completely finish @rq.
2591 : : */
2592 : 0 : void blk_end_request_all(struct request *rq, int error)
2593 : : {
2594 : : bool pending;
2595 : : unsigned int bidi_bytes = 0;
2596 : :
2597 [ # # ]: 0 : if (unlikely(blk_bidi_rq(rq)))
2598 : : bidi_bytes = blk_rq_bytes(rq->next_rq);
2599 : :
2600 : 0 : pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2601 [ # # ]: 0 : BUG_ON(pending);
2602 : 0 : }
2603 : : EXPORT_SYMBOL(blk_end_request_all);
2604 : :
2605 : : /**
2606 : : * blk_end_request_cur - Helper function to finish the current request chunk.
2607 : : * @rq: the request to finish the current chunk for
2608 : : * @error: %0 for success, < %0 for error
2609 : : *
2610 : : * Description:
2611 : : * Complete the current consecutively mapped chunk from @rq.
2612 : : *
2613 : : * Return:
2614 : : * %false - we are done with this request
2615 : : * %true - still buffers pending for this request
2616 : : */
2617 : 0 : bool blk_end_request_cur(struct request *rq, int error)
2618 : : {
2619 : 0 : return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2620 : : }
2621 : : EXPORT_SYMBOL(blk_end_request_cur);
2622 : :
2623 : : /**
2624 : : * blk_end_request_err - Finish a request till the next failure boundary.
2625 : : * @rq: the request to finish till the next failure boundary for
2626 : : * @error: must be negative errno
2627 : : *
2628 : : * Description:
2629 : : * Complete @rq till the next failure boundary.
2630 : : *
2631 : : * Return:
2632 : : * %false - we are done with this request
2633 : : * %true - still buffers pending for this request
2634 : : */
2635 : 0 : bool blk_end_request_err(struct request *rq, int error)
2636 : : {
2637 [ # # ]: 0 : WARN_ON(error >= 0);
2638 : 0 : return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2639 : : }
2640 : : EXPORT_SYMBOL_GPL(blk_end_request_err);
2641 : :
2642 : : /**
2643 : : * __blk_end_request - Helper function for drivers to complete the request.
2644 : : * @rq: the request being processed
2645 : : * @error: %0 for success, < %0 for error
2646 : : * @nr_bytes: number of bytes to complete
2647 : : *
2648 : : * Description:
2649 : : * Must be called with queue lock held unlike blk_end_request().
2650 : : *
2651 : : * Return:
2652 : : * %false - we are done with this request
2653 : : * %true - still buffers pending for this request
2654 : : **/
2655 : 0 : bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2656 : : {
2657 : 0 : return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2658 : : }
2659 : : EXPORT_SYMBOL(__blk_end_request);
2660 : :
2661 : : /**
2662 : : * __blk_end_request_all - Helper function for drives to finish the request.
2663 : : * @rq: the request to finish
2664 : : * @error: %0 for success, < %0 for error
2665 : : *
2666 : : * Description:
2667 : : * Completely finish @rq. Must be called with queue lock held.
2668 : : */
2669 : 0 : void __blk_end_request_all(struct request *rq, int error)
2670 : : {
2671 : : bool pending;
2672 : : unsigned int bidi_bytes = 0;
2673 : :
2674 [ # # ]: 0 : if (unlikely(blk_bidi_rq(rq)))
2675 : : bidi_bytes = blk_rq_bytes(rq->next_rq);
2676 : :
2677 : 0 : pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2678 [ # # ]: 0 : BUG_ON(pending);
2679 : 0 : }
2680 : : EXPORT_SYMBOL(__blk_end_request_all);
2681 : :
2682 : : /**
2683 : : * __blk_end_request_cur - Helper function to finish the current request chunk.
2684 : : * @rq: the request to finish the current chunk for
2685 : : * @error: %0 for success, < %0 for error
2686 : : *
2687 : : * Description:
2688 : : * Complete the current consecutively mapped chunk from @rq. Must
2689 : : * be called with queue lock held.
2690 : : *
2691 : : * Return:
2692 : : * %false - we are done with this request
2693 : : * %true - still buffers pending for this request
2694 : : */
2695 : 0 : bool __blk_end_request_cur(struct request *rq, int error)
2696 : : {
2697 : 0 : return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2698 : : }
2699 : : EXPORT_SYMBOL(__blk_end_request_cur);
2700 : :
2701 : : /**
2702 : : * __blk_end_request_err - Finish a request till the next failure boundary.
2703 : : * @rq: the request to finish till the next failure boundary for
2704 : : * @error: must be negative errno
2705 : : *
2706 : : * Description:
2707 : : * Complete @rq till the next failure boundary. Must be called
2708 : : * with queue lock held.
2709 : : *
2710 : : * Return:
2711 : : * %false - we are done with this request
2712 : : * %true - still buffers pending for this request
2713 : : */
2714 : 0 : bool __blk_end_request_err(struct request *rq, int error)
2715 : : {
2716 [ # # ]: 0 : WARN_ON(error >= 0);
2717 : 0 : return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2718 : : }
2719 : : EXPORT_SYMBOL_GPL(__blk_end_request_err);
2720 : :
2721 : 0 : void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2722 : : struct bio *bio)
2723 : : {
2724 : : /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2725 : 396232 : rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2726 : :
2727 [ + + ]: 396232 : if (bio_has_data(bio)) {
2728 : 396227 : rq->nr_phys_segments = bio_phys_segments(q, bio);
2729 : 396244 : rq->buffer = bio_data(bio);
2730 : : }
2731 : 396249 : rq->__data_len = bio->bi_size;
2732 : 396249 : rq->bio = rq->biotail = bio;
2733 : :
2734 [ + + ]: 396249 : if (bio->bi_bdev)
2735 : 396231 : rq->rq_disk = bio->bi_bdev->bd_disk;
2736 : 17 : }
2737 : :
2738 : : #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2739 : : /**
2740 : : * rq_flush_dcache_pages - Helper function to flush all pages in a request
2741 : : * @rq: the request to be flushed
2742 : : *
2743 : : * Description:
2744 : : * Flush all pages in @rq.
2745 : : */
2746 : 0 : void rq_flush_dcache_pages(struct request *rq)
2747 : : {
2748 : : struct req_iterator iter;
2749 : : struct bio_vec *bvec;
2750 : :
2751 [ # # ][ # # ]: 0 : rq_for_each_segment(bvec, rq, iter)
[ # # ]
2752 : 0 : flush_dcache_page(bvec->bv_page);
2753 : 0 : }
2754 : : EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2755 : : #endif
2756 : :
2757 : : /**
2758 : : * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2759 : : * @q : the queue of the device being checked
2760 : : *
2761 : : * Description:
2762 : : * Check if underlying low-level drivers of a device are busy.
2763 : : * If the drivers want to export their busy state, they must set own
2764 : : * exporting function using blk_queue_lld_busy() first.
2765 : : *
2766 : : * Basically, this function is used only by request stacking drivers
2767 : : * to stop dispatching requests to underlying devices when underlying
2768 : : * devices are busy. This behavior helps more I/O merging on the queue
2769 : : * of the request stacking driver and prevents I/O throughput regression
2770 : : * on burst I/O load.
2771 : : *
2772 : : * Return:
2773 : : * 0 - Not busy (The request stacking driver should dispatch request)
2774 : : * 1 - Busy (The request stacking driver should stop dispatching request)
2775 : : */
2776 : 0 : int blk_lld_busy(struct request_queue *q)
2777 : : {
2778 [ # # ]: 0 : if (q->lld_busy_fn)
2779 : 0 : return q->lld_busy_fn(q);
2780 : :
2781 : : return 0;
2782 : : }
2783 : : EXPORT_SYMBOL_GPL(blk_lld_busy);
2784 : :
2785 : : /**
2786 : : * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2787 : : * @rq: the clone request to be cleaned up
2788 : : *
2789 : : * Description:
2790 : : * Free all bios in @rq for a cloned request.
2791 : : */
2792 : 0 : void blk_rq_unprep_clone(struct request *rq)
2793 : : {
2794 : : struct bio *bio;
2795 : :
2796 [ # # ][ # # ]: 0 : while ((bio = rq->bio) != NULL) {
2797 : 0 : rq->bio = bio->bi_next;
2798 : :
2799 : 0 : bio_put(bio);
2800 : : }
2801 : 0 : }
2802 : : EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2803 : :
2804 : : /*
2805 : : * Copy attributes of the original request to the clone request.
2806 : : * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2807 : : */
2808 : 0 : static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2809 : : {
2810 : 0 : dst->cpu = src->cpu;
2811 : 0 : dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2812 : 0 : dst->cmd_type = src->cmd_type;
2813 : 0 : dst->__sector = blk_rq_pos(src);
2814 : 0 : dst->__data_len = blk_rq_bytes(src);
2815 : 0 : dst->nr_phys_segments = src->nr_phys_segments;
2816 : 0 : dst->ioprio = src->ioprio;
2817 : 0 : dst->extra_len = src->extra_len;
2818 : : }
2819 : :
2820 : : /**
2821 : : * blk_rq_prep_clone - Helper function to setup clone request
2822 : : * @rq: the request to be setup
2823 : : * @rq_src: original request to be cloned
2824 : : * @bs: bio_set that bios for clone are allocated from
2825 : : * @gfp_mask: memory allocation mask for bio
2826 : : * @bio_ctr: setup function to be called for each clone bio.
2827 : : * Returns %0 for success, non %0 for failure.
2828 : : * @data: private data to be passed to @bio_ctr
2829 : : *
2830 : : * Description:
2831 : : * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2832 : : * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2833 : : * are not copied, and copying such parts is the caller's responsibility.
2834 : : * Also, pages which the original bios are pointing to are not copied
2835 : : * and the cloned bios just point same pages.
2836 : : * So cloned bios must be completed before original bios, which means
2837 : : * the caller must complete @rq before @rq_src.
2838 : : */
2839 : 0 : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2840 : : struct bio_set *bs, gfp_t gfp_mask,
2841 : : int (*bio_ctr)(struct bio *, struct bio *, void *),
2842 : : void *data)
2843 : : {
2844 : : struct bio *bio, *bio_src;
2845 : :
2846 [ # # ]: 0 : if (!bs)
2847 : 0 : bs = fs_bio_set;
2848 : :
2849 : 0 : blk_rq_init(NULL, rq);
2850 : :
2851 [ # # ][ # # ]: 0 : __rq_for_each_bio(bio_src, rq_src) {
2852 : 0 : bio = bio_clone_bioset(bio_src, gfp_mask, bs);
2853 [ # # ]: 0 : if (!bio)
2854 : : goto free_and_out;
2855 : :
2856 [ # # ][ # # ]: 0 : if (bio_ctr && bio_ctr(bio, bio_src, data))
2857 : : goto free_and_out;
2858 : :
2859 [ # # ]: 0 : if (rq->bio) {
2860 : 0 : rq->biotail->bi_next = bio;
2861 : 0 : rq->biotail = bio;
2862 : : } else
2863 : 0 : rq->bio = rq->biotail = bio;
2864 : : }
2865 : :
2866 : : __blk_rq_prep_clone(rq, rq_src);
2867 : :
2868 : 0 : return 0;
2869 : :
2870 : : free_and_out:
2871 [ # # ]: 0 : if (bio)
2872 : 0 : bio_put(bio);
2873 : : blk_rq_unprep_clone(rq);
2874 : :
2875 : : return -ENOMEM;
2876 : : }
2877 : : EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2878 : :
2879 : 0 : int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2880 : : {
2881 : 452918 : return queue_work(kblockd_workqueue, work);
2882 : : }
2883 : : EXPORT_SYMBOL(kblockd_schedule_work);
2884 : :
2885 : 0 : int kblockd_schedule_delayed_work(struct request_queue *q,
2886 : : struct delayed_work *dwork, unsigned long delay)
2887 : : {
2888 : 0 : return queue_delayed_work(kblockd_workqueue, dwork, delay);
2889 : : }
2890 : : EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2891 : :
2892 : : #define PLUG_MAGIC 0x91827364
2893 : :
2894 : : /**
2895 : : * blk_start_plug - initialize blk_plug and track it inside the task_struct
2896 : : * @plug: The &struct blk_plug that needs to be initialized
2897 : : *
2898 : : * Description:
2899 : : * Tracking blk_plug inside the task_struct will help with auto-flushing the
2900 : : * pending I/O should the task end up blocking between blk_start_plug() and
2901 : : * blk_finish_plug(). This is important from a performance perspective, but
2902 : : * also ensures that we don't deadlock. For instance, if the task is blocking
2903 : : * for a memory allocation, memory reclaim could end up wanting to free a
2904 : : * page belonging to that request that is currently residing in our private
2905 : : * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2906 : : * this kind of deadlock.
2907 : : */
2908 : 0 : void blk_start_plug(struct blk_plug *plug)
2909 : : {
2910 : 1449824 : struct task_struct *tsk = current;
2911 : :
2912 : 1449824 : plug->magic = PLUG_MAGIC;
2913 : 1449824 : INIT_LIST_HEAD(&plug->list);
2914 : 1449824 : INIT_LIST_HEAD(&plug->mq_list);
2915 : 1449824 : INIT_LIST_HEAD(&plug->cb_list);
2916 : :
2917 : : /*
2918 : : * If this is a nested plug, don't actually assign it. It will be
2919 : : * flushed on its own.
2920 : : */
2921 [ + + ]: 1449824 : if (!tsk->plug) {
2922 : : /*
2923 : : * Store ordering should not be needed here, since a potential
2924 : : * preempt will imply a full memory barrier
2925 : : */
2926 : 1350812 : tsk->plug = plug;
2927 : : }
2928 : 0 : }
2929 : : EXPORT_SYMBOL(blk_start_plug);
2930 : :
2931 : 0 : static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2932 : : {
2933 : 624338 : struct request *rqa = container_of(a, struct request, queuelist);
2934 : 624338 : struct request *rqb = container_of(b, struct request, queuelist);
2935 : :
2936 [ + + ][ + + ]: 624394 : return !(rqa->q < rqb->q ||
2937 [ + + ]: 624360 : (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
2938 : : }
2939 : :
2940 : : /*
2941 : : * If 'from_schedule' is true, then postpone the dispatch of requests
2942 : : * until a safe kblockd context. We due this to avoid accidental big
2943 : : * additional stack usage in driver dispatch, in places where the originally
2944 : : * plugger did not intend it.
2945 : : */
2946 : 0 : static void queue_unplugged(struct request_queue *q, unsigned int depth,
2947 : : bool from_schedule)
2948 : : __releases(q->queue_lock)
2949 : : {
2950 : 242071 : trace_block_unplug(q, depth, !from_schedule);
2951 : :
2952 [ + + ]: 242071 : if (from_schedule)
2953 : 1175 : blk_run_queue_async(q);
2954 : : else
2955 : 240896 : __blk_run_queue(q);
2956 : 242071 : spin_unlock(q->queue_lock);
2957 : 242071 : }
2958 : :
2959 : 0 : static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
2960 : : {
2961 : 1568709 : LIST_HEAD(callbacks);
2962 : :
2963 [ - + ]: 1568709 : while (!list_empty(&plug->cb_list)) {
2964 : : list_splice_init(&plug->cb_list, &callbacks);
2965 : :
2966 [ # # ]: 1568709 : while (!list_empty(&callbacks)) {
2967 : : struct blk_plug_cb *cb = list_first_entry(&callbacks,
2968 : : struct blk_plug_cb,
2969 : : list);
2970 : : list_del(&cb->list);
2971 : 0 : cb->callback(cb, from_schedule);
2972 : : }
2973 : : }
2974 : 1568709 : }
2975 : :
2976 : 0 : struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
2977 : : int size)
2978 : : {
2979 : 0 : struct blk_plug *plug = current->plug;
2980 : : struct blk_plug_cb *cb;
2981 : :
2982 [ # # ]: 0 : if (!plug)
2983 : : return NULL;
2984 : :
2985 [ # # ]: 0 : list_for_each_entry(cb, &plug->cb_list, list)
2986 [ # # ][ # # ]: 0 : if (cb->callback == unplug && cb->data == data)
2987 : : return cb;
2988 : :
2989 : : /* Not currently on the callback list */
2990 [ # # ]: 0 : BUG_ON(size < sizeof(*cb));
2991 : : cb = kzalloc(size, GFP_ATOMIC);
2992 [ # # ]: 0 : if (cb) {
2993 : 0 : cb->data = data;
2994 : 0 : cb->callback = unplug;
2995 : 0 : list_add(&cb->list, &plug->cb_list);
2996 : : }
2997 : 0 : return cb;
2998 : : }
2999 : : EXPORT_SYMBOL(blk_check_plugged);
3000 : :
3001 : 0 : void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3002 : : {
3003 : : struct request_queue *q;
3004 : : unsigned long flags;
3005 : : struct request *rq;
3006 : 1568799 : LIST_HEAD(list);
3007 : : unsigned int depth;
3008 : :
3009 [ - + ]: 1568799 : BUG_ON(plug->magic != PLUG_MAGIC);
3010 : :
3011 : 1568799 : flush_plug_callbacks(plug, from_schedule);
3012 : :
3013 [ - + ]: 1568783 : if (!list_empty(&plug->mq_list))
3014 : 0 : blk_mq_flush_plug_list(plug, from_schedule);
3015 : :
3016 [ + + ]: 3137516 : if (list_empty(&plug->list))
3017 : 1326712 : return;
3018 : :
3019 : : list_splice_init(&plug->list, &list);
3020 : :
3021 : 242005 : list_sort(NULL, &list, plug_rq_cmp);
3022 : :
3023 : : q = NULL;
3024 : : depth = 0;
3025 : :
3026 : : /*
3027 : : * Save and disable interrupts here, to avoid doing it for every
3028 : : * queue lock we have to take.
3029 : : */
3030 : : local_irq_save(flags);
3031 [ + + ]: 616341 : while (!list_empty(&list)) {
3032 : : rq = list_entry_rq(list.next);
3033 : 374270 : list_del_init(&rq->queuelist);
3034 [ + + ]: 374270 : BUG_ON(!rq->q);
3035 [ + + ]: 374213 : if (rq->q != q) {
3036 : : /*
3037 : : * This drops the queue lock
3038 : : */
3039 [ - + ]: 242056 : if (q)
3040 : 0 : queue_unplugged(q, depth, from_schedule);
3041 : 242056 : q = rq->q;
3042 : : depth = 0;
3043 : 242056 : spin_lock(q->queue_lock);
3044 : : }
3045 : :
3046 : : /*
3047 : : * Short-circuit if @q is dead
3048 : : */
3049 [ - + ]: 374281 : if (unlikely(blk_queue_dying(q))) {
3050 : 0 : __blk_end_request_all(rq, -ENODEV);
3051 : 0 : continue;
3052 : : }
3053 : :
3054 : : /*
3055 : : * rq is already accounted, so use raw insert
3056 : : */
3057 [ - + ]: 374281 : if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
3058 : 0 : __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3059 : : else
3060 : 374281 : __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3061 : :
3062 : 374281 : depth++;
3063 : : }
3064 : :
3065 : : /*
3066 : : * This drops the queue lock
3067 : : */
3068 [ + - ]: 242071 : if (q)
3069 : 242071 : queue_unplugged(q, depth, from_schedule);
3070 : :
3071 [ - + ]: 242070 : local_irq_restore(flags);
3072 : : }
3073 : :
3074 : 0 : void blk_finish_plug(struct blk_plug *plug)
3075 : : {
3076 : 1449960 : blk_flush_plug_list(plug, false);
3077 : :
3078 [ + + ]: 1449944 : if (plug == current->plug)
3079 : 1350930 : current->plug = NULL;
3080 : 0 : }
3081 : : EXPORT_SYMBOL(blk_finish_plug);
3082 : :
3083 : : #ifdef CONFIG_PM_RUNTIME
3084 : : /**
3085 : : * blk_pm_runtime_init - Block layer runtime PM initialization routine
3086 : : * @q: the queue of the device
3087 : : * @dev: the device the queue belongs to
3088 : : *
3089 : : * Description:
3090 : : * Initialize runtime-PM-related fields for @q and start auto suspend for
3091 : : * @dev. Drivers that want to take advantage of request-based runtime PM
3092 : : * should call this function after @dev has been initialized, and its
3093 : : * request queue @q has been allocated, and runtime PM for it can not happen
3094 : : * yet(either due to disabled/forbidden or its usage_count > 0). In most
3095 : : * cases, driver should call this function before any I/O has taken place.
3096 : : *
3097 : : * This function takes care of setting up using auto suspend for the device,
3098 : : * the autosuspend delay is set to -1 to make runtime suspend impossible
3099 : : * until an updated value is either set by user or by driver. Drivers do
3100 : : * not need to touch other autosuspend settings.
3101 : : *
3102 : : * The block layer runtime PM is request based, so only works for drivers
3103 : : * that use request as their IO unit instead of those directly use bio's.
3104 : : */
3105 : : void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3106 : : {
3107 : : q->dev = dev;
3108 : : q->rpm_status = RPM_ACTIVE;
3109 : : pm_runtime_set_autosuspend_delay(q->dev, -1);
3110 : : pm_runtime_use_autosuspend(q->dev);
3111 : : }
3112 : : EXPORT_SYMBOL(blk_pm_runtime_init);
3113 : :
3114 : : /**
3115 : : * blk_pre_runtime_suspend - Pre runtime suspend check
3116 : : * @q: the queue of the device
3117 : : *
3118 : : * Description:
3119 : : * This function will check if runtime suspend is allowed for the device
3120 : : * by examining if there are any requests pending in the queue. If there
3121 : : * are requests pending, the device can not be runtime suspended; otherwise,
3122 : : * the queue's status will be updated to SUSPENDING and the driver can
3123 : : * proceed to suspend the device.
3124 : : *
3125 : : * For the not allowed case, we mark last busy for the device so that
3126 : : * runtime PM core will try to autosuspend it some time later.
3127 : : *
3128 : : * This function should be called near the start of the device's
3129 : : * runtime_suspend callback.
3130 : : *
3131 : : * Return:
3132 : : * 0 - OK to runtime suspend the device
3133 : : * -EBUSY - Device should not be runtime suspended
3134 : : */
3135 : : int blk_pre_runtime_suspend(struct request_queue *q)
3136 : : {
3137 : : int ret = 0;
3138 : :
3139 : : spin_lock_irq(q->queue_lock);
3140 : : if (q->nr_pending) {
3141 : : ret = -EBUSY;
3142 : : pm_runtime_mark_last_busy(q->dev);
3143 : : } else {
3144 : : q->rpm_status = RPM_SUSPENDING;
3145 : : }
3146 : : spin_unlock_irq(q->queue_lock);
3147 : : return ret;
3148 : : }
3149 : : EXPORT_SYMBOL(blk_pre_runtime_suspend);
3150 : :
3151 : : /**
3152 : : * blk_post_runtime_suspend - Post runtime suspend processing
3153 : : * @q: the queue of the device
3154 : : * @err: return value of the device's runtime_suspend function
3155 : : *
3156 : : * Description:
3157 : : * Update the queue's runtime status according to the return value of the
3158 : : * device's runtime suspend function and mark last busy for the device so
3159 : : * that PM core will try to auto suspend the device at a later time.
3160 : : *
3161 : : * This function should be called near the end of the device's
3162 : : * runtime_suspend callback.
3163 : : */
3164 : : void blk_post_runtime_suspend(struct request_queue *q, int err)
3165 : : {
3166 : : spin_lock_irq(q->queue_lock);
3167 : : if (!err) {
3168 : : q->rpm_status = RPM_SUSPENDED;
3169 : : } else {
3170 : : q->rpm_status = RPM_ACTIVE;
3171 : : pm_runtime_mark_last_busy(q->dev);
3172 : : }
3173 : : spin_unlock_irq(q->queue_lock);
3174 : : }
3175 : : EXPORT_SYMBOL(blk_post_runtime_suspend);
3176 : :
3177 : : /**
3178 : : * blk_pre_runtime_resume - Pre runtime resume processing
3179 : : * @q: the queue of the device
3180 : : *
3181 : : * Description:
3182 : : * Update the queue's runtime status to RESUMING in preparation for the
3183 : : * runtime resume of the device.
3184 : : *
3185 : : * This function should be called near the start of the device's
3186 : : * runtime_resume callback.
3187 : : */
3188 : : void blk_pre_runtime_resume(struct request_queue *q)
3189 : : {
3190 : : spin_lock_irq(q->queue_lock);
3191 : : q->rpm_status = RPM_RESUMING;
3192 : : spin_unlock_irq(q->queue_lock);
3193 : : }
3194 : : EXPORT_SYMBOL(blk_pre_runtime_resume);
3195 : :
3196 : : /**
3197 : : * blk_post_runtime_resume - Post runtime resume processing
3198 : : * @q: the queue of the device
3199 : : * @err: return value of the device's runtime_resume function
3200 : : *
3201 : : * Description:
3202 : : * Update the queue's runtime status according to the return value of the
3203 : : * device's runtime_resume function. If it is successfully resumed, process
3204 : : * the requests that are queued into the device's queue when it is resuming
3205 : : * and then mark last busy and initiate autosuspend for it.
3206 : : *
3207 : : * This function should be called near the end of the device's
3208 : : * runtime_resume callback.
3209 : : */
3210 : : void blk_post_runtime_resume(struct request_queue *q, int err)
3211 : : {
3212 : : spin_lock_irq(q->queue_lock);
3213 : : if (!err) {
3214 : : q->rpm_status = RPM_ACTIVE;
3215 : : __blk_run_queue(q);
3216 : : pm_runtime_mark_last_busy(q->dev);
3217 : : pm_request_autosuspend(q->dev);
3218 : : } else {
3219 : : q->rpm_status = RPM_SUSPENDED;
3220 : : }
3221 : : spin_unlock_irq(q->queue_lock);
3222 : : }
3223 : : EXPORT_SYMBOL(blk_post_runtime_resume);
3224 : : #endif
3225 : :
3226 : 0 : int __init blk_dev_init(void)
3227 : : {
3228 : : BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3229 : : sizeof(((struct request *)0)->cmd_flags));
3230 : :
3231 : : /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3232 : 0 : kblockd_workqueue = alloc_workqueue("kblockd",
3233 : : WQ_MEM_RECLAIM | WQ_HIGHPRI |
3234 : : WQ_POWER_EFFICIENT, 0);
3235 [ # # ]: 0 : if (!kblockd_workqueue)
3236 : 0 : panic("Failed to create kblockd\n");
3237 : :
3238 : 0 : request_cachep = kmem_cache_create("blkdev_requests",
3239 : : sizeof(struct request), 0, SLAB_PANIC, NULL);
3240 : :
3241 : 0 : blk_requestq_cachep = kmem_cache_create("blkdev_queue",
3242 : : sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3243 : :
3244 : 0 : return 0;
3245 : : }
|