LCOV - code coverage report
Current view: top level - block - blk-settings.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 222 0.0 %
Date: 2014-02-18 Functions: 0 37 0.0 %
Branches: 0 120 0.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Functions related to setting various queue properties from drivers
       3                 :            :  */
       4                 :            : #include <linux/kernel.h>
       5                 :            : #include <linux/module.h>
       6                 :            : #include <linux/init.h>
       7                 :            : #include <linux/bio.h>
       8                 :            : #include <linux/blkdev.h>
       9                 :            : #include <linux/bootmem.h>        /* for max_pfn/max_low_pfn */
      10                 :            : #include <linux/gcd.h>
      11                 :            : #include <linux/lcm.h>
      12                 :            : #include <linux/jiffies.h>
      13                 :            : #include <linux/gfp.h>
      14                 :            : 
      15                 :            : #include "blk.h"
      16                 :            : 
      17                 :            : unsigned long blk_max_low_pfn;
      18                 :            : EXPORT_SYMBOL(blk_max_low_pfn);
      19                 :            : 
      20                 :            : unsigned long blk_max_pfn;
      21                 :            : 
      22                 :            : /**
      23                 :            :  * blk_queue_prep_rq - set a prepare_request function for queue
      24                 :            :  * @q:          queue
      25                 :            :  * @pfn:        prepare_request function
      26                 :            :  *
      27                 :            :  * It's possible for a queue to register a prepare_request callback which
      28                 :            :  * is invoked before the request is handed to the request_fn. The goal of
      29                 :            :  * the function is to prepare a request for I/O, it can be used to build a
      30                 :            :  * cdb from the request data for instance.
      31                 :            :  *
      32                 :            :  */
      33                 :          0 : void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
      34                 :            : {
      35                 :          0 :         q->prep_rq_fn = pfn;
      36                 :          0 : }
      37                 :            : EXPORT_SYMBOL(blk_queue_prep_rq);
      38                 :            : 
      39                 :            : /**
      40                 :            :  * blk_queue_unprep_rq - set an unprepare_request function for queue
      41                 :            :  * @q:          queue
      42                 :            :  * @ufn:        unprepare_request function
      43                 :            :  *
      44                 :            :  * It's possible for a queue to register an unprepare_request callback
      45                 :            :  * which is invoked before the request is finally completed. The goal
      46                 :            :  * of the function is to deallocate any data that was allocated in the
      47                 :            :  * prepare_request callback.
      48                 :            :  *
      49                 :            :  */
      50                 :          0 : void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
      51                 :            : {
      52                 :          0 :         q->unprep_rq_fn = ufn;
      53                 :          0 : }
      54                 :            : EXPORT_SYMBOL(blk_queue_unprep_rq);
      55                 :            : 
      56                 :            : /**
      57                 :            :  * blk_queue_merge_bvec - set a merge_bvec function for queue
      58                 :            :  * @q:          queue
      59                 :            :  * @mbfn:       merge_bvec_fn
      60                 :            :  *
      61                 :            :  * Usually queues have static limitations on the max sectors or segments that
      62                 :            :  * we can put in a request. Stacking drivers may have some settings that
      63                 :            :  * are dynamic, and thus we have to query the queue whether it is ok to
      64                 :            :  * add a new bio_vec to a bio at a given offset or not. If the block device
      65                 :            :  * has such limitations, it needs to register a merge_bvec_fn to control
      66                 :            :  * the size of bio's sent to it. Note that a block device *must* allow a
      67                 :            :  * single page to be added to an empty bio. The block device driver may want
      68                 :            :  * to use the bio_split() function to deal with these bio's. By default
      69                 :            :  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
      70                 :            :  * honored.
      71                 :            :  */
      72                 :          0 : void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
      73                 :            : {
      74                 :          0 :         q->merge_bvec_fn = mbfn;
      75                 :          0 : }
      76                 :            : EXPORT_SYMBOL(blk_queue_merge_bvec);
      77                 :            : 
      78                 :          0 : void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
      79                 :            : {
      80                 :          0 :         q->softirq_done_fn = fn;
      81                 :          0 : }
      82                 :            : EXPORT_SYMBOL(blk_queue_softirq_done);
      83                 :            : 
      84                 :          0 : void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
      85                 :            : {
      86                 :          0 :         q->rq_timeout = timeout;
      87                 :          0 : }
      88                 :            : EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
      89                 :            : 
      90                 :          0 : void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
      91                 :            : {
      92                 :          0 :         q->rq_timed_out_fn = fn;
      93                 :          0 : }
      94                 :            : EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
      95                 :            : 
      96                 :          0 : void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
      97                 :            : {
      98                 :          0 :         q->lld_busy_fn = fn;
      99                 :          0 : }
     100                 :            : EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
     101                 :            : 
     102                 :            : /**
     103                 :            :  * blk_set_default_limits - reset limits to default values
     104                 :            :  * @lim:  the queue_limits structure to reset
     105                 :            :  *
     106                 :            :  * Description:
     107                 :            :  *   Returns a queue_limit struct to its default state.
     108                 :            :  */
     109                 :          0 : void blk_set_default_limits(struct queue_limits *lim)
     110                 :            : {
     111                 :          0 :         lim->max_segments = BLK_MAX_SEGMENTS;
     112                 :          0 :         lim->max_integrity_segments = 0;
     113                 :          0 :         lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
     114                 :          0 :         lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
     115                 :          0 :         lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
     116                 :          0 :         lim->max_write_same_sectors = 0;
     117                 :          0 :         lim->max_discard_sectors = 0;
     118                 :          0 :         lim->discard_granularity = 0;
     119                 :          0 :         lim->discard_alignment = 0;
     120                 :          0 :         lim->discard_misaligned = 0;
     121                 :          0 :         lim->discard_zeroes_data = 0;
     122                 :          0 :         lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
     123                 :          0 :         lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
     124                 :          0 :         lim->alignment_offset = 0;
     125                 :          0 :         lim->io_opt = 0;
     126                 :          0 :         lim->misaligned = 0;
     127                 :          0 :         lim->cluster = 1;
     128                 :          0 : }
     129                 :            : EXPORT_SYMBOL(blk_set_default_limits);
     130                 :            : 
     131                 :            : /**
     132                 :            :  * blk_set_stacking_limits - set default limits for stacking devices
     133                 :            :  * @lim:  the queue_limits structure to reset
     134                 :            :  *
     135                 :            :  * Description:
     136                 :            :  *   Returns a queue_limit struct to its default state. Should be used
     137                 :            :  *   by stacking drivers like DM that have no internal limits.
     138                 :            :  */
     139                 :          0 : void blk_set_stacking_limits(struct queue_limits *lim)
     140                 :            : {
     141                 :            :         blk_set_default_limits(lim);
     142                 :            : 
     143                 :            :         /* Inherit limits from component devices */
     144                 :          0 :         lim->discard_zeroes_data = 1;
     145                 :          0 :         lim->max_segments = USHRT_MAX;
     146                 :          0 :         lim->max_hw_sectors = UINT_MAX;
     147                 :          0 :         lim->max_segment_size = UINT_MAX;
     148                 :          0 :         lim->max_sectors = UINT_MAX;
     149                 :          0 :         lim->max_write_same_sectors = UINT_MAX;
     150                 :          0 : }
     151                 :            : EXPORT_SYMBOL(blk_set_stacking_limits);
     152                 :            : 
     153                 :            : /**
     154                 :            :  * blk_queue_make_request - define an alternate make_request function for a device
     155                 :            :  * @q:  the request queue for the device to be affected
     156                 :            :  * @mfn: the alternate make_request function
     157                 :            :  *
     158                 :            :  * Description:
     159                 :            :  *    The normal way for &struct bios to be passed to a device
     160                 :            :  *    driver is for them to be collected into requests on a request
     161                 :            :  *    queue, and then to allow the device driver to select requests
     162                 :            :  *    off that queue when it is ready.  This works well for many block
     163                 :            :  *    devices. However some block devices (typically virtual devices
     164                 :            :  *    such as md or lvm) do not benefit from the processing on the
     165                 :            :  *    request queue, and are served best by having the requests passed
     166                 :            :  *    directly to them.  This can be achieved by providing a function
     167                 :            :  *    to blk_queue_make_request().
     168                 :            :  *
     169                 :            :  * Caveat:
     170                 :            :  *    The driver that does this *must* be able to deal appropriately
     171                 :            :  *    with buffers in "highmemory". This can be accomplished by either calling
     172                 :            :  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
     173                 :            :  *    blk_queue_bounce() to create a buffer in normal memory.
     174                 :            :  **/
     175                 :          0 : void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
     176                 :            : {
     177                 :            :         /*
     178                 :            :          * set defaults
     179                 :            :          */
     180                 :          0 :         q->nr_requests = BLKDEV_MAX_RQ;
     181                 :            : 
     182                 :          0 :         q->make_request_fn = mfn;
     183                 :            :         blk_queue_dma_alignment(q, 511);
     184                 :          0 :         blk_queue_congestion_threshold(q);
     185                 :          0 :         q->nr_batching = BLK_BATCH_REQ;
     186                 :            : 
     187                 :            :         blk_set_default_limits(&q->limits);
     188                 :            : 
     189                 :            :         /*
     190                 :            :          * by default assume old behaviour and bounce for any highmem page
     191                 :            :          */
     192                 :          0 :         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
     193                 :          0 : }
     194                 :            : EXPORT_SYMBOL(blk_queue_make_request);
     195                 :            : 
     196                 :            : /**
     197                 :            :  * blk_queue_bounce_limit - set bounce buffer limit for queue
     198                 :            :  * @q: the request queue for the device
     199                 :            :  * @max_addr: the maximum address the device can handle
     200                 :            :  *
     201                 :            :  * Description:
     202                 :            :  *    Different hardware can have different requirements as to what pages
     203                 :            :  *    it can do I/O directly to. A low level driver can call
     204                 :            :  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
     205                 :            :  *    buffers for doing I/O to pages residing above @max_addr.
     206                 :            :  **/
     207                 :          0 : void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
     208                 :            : {
     209                 :          0 :         unsigned long b_pfn = max_addr >> PAGE_SHIFT;
     210                 :            :         int dma = 0;
     211                 :            : 
     212                 :          0 :         q->bounce_gfp = GFP_NOIO;
     213                 :            : #if BITS_PER_LONG == 64
     214                 :            :         /*
     215                 :            :          * Assume anything <= 4GB can be handled by IOMMU.  Actually
     216                 :            :          * some IOMMUs can handle everything, but I don't know of a
     217                 :            :          * way to test this here.
     218                 :            :          */
     219                 :            :         if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
     220                 :            :                 dma = 1;
     221                 :            :         q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
     222                 :            : #else
     223 [ #  # ][ #  # ]:          0 :         if (b_pfn < blk_max_low_pfn)
     224                 :            :                 dma = 1;
     225                 :          0 :         q->limits.bounce_pfn = b_pfn;
     226                 :            : #endif
     227 [ #  # ][ #  # ]:          0 :         if (dma) {
     228                 :          0 :                 init_emergency_isa_pool();
     229                 :          0 :                 q->bounce_gfp = GFP_NOIO | GFP_DMA;
     230                 :          0 :                 q->limits.bounce_pfn = b_pfn;
     231                 :            :         }
     232                 :          0 : }
     233                 :            : EXPORT_SYMBOL(blk_queue_bounce_limit);
     234                 :            : 
     235                 :            : /**
     236                 :            :  * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
     237                 :            :  * @limits: the queue limits
     238                 :            :  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
     239                 :            :  *
     240                 :            :  * Description:
     241                 :            :  *    Enables a low level driver to set a hard upper limit,
     242                 :            :  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
     243                 :            :  *    the device driver based upon the combined capabilities of I/O
     244                 :            :  *    controller and storage device.
     245                 :            :  *
     246                 :            :  *    max_sectors is a soft limit imposed by the block layer for
     247                 :            :  *    filesystem type requests.  This value can be overridden on a
     248                 :            :  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
     249                 :            :  *    The soft limit can not exceed max_hw_sectors.
     250                 :            :  **/
     251                 :          0 : void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
     252                 :            : {
     253         [ #  # ]:          0 :         if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
     254                 :            :                 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
     255                 :          0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     256                 :            :                        __func__, max_hw_sectors);
     257                 :            :         }
     258                 :            : 
     259                 :          0 :         limits->max_hw_sectors = max_hw_sectors;
     260                 :          0 :         limits->max_sectors = min_t(unsigned int, max_hw_sectors,
     261                 :            :                                     BLK_DEF_MAX_SECTORS);
     262                 :          0 : }
     263                 :            : EXPORT_SYMBOL(blk_limits_max_hw_sectors);
     264                 :            : 
     265                 :            : /**
     266                 :            :  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
     267                 :            :  * @q:  the request queue for the device
     268                 :            :  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
     269                 :            :  *
     270                 :            :  * Description:
     271                 :            :  *    See description for blk_limits_max_hw_sectors().
     272                 :            :  **/
     273                 :          0 : void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
     274                 :            : {
     275                 :          0 :         blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
     276                 :          0 : }
     277                 :            : EXPORT_SYMBOL(blk_queue_max_hw_sectors);
     278                 :            : 
     279                 :            : /**
     280                 :            :  * blk_queue_max_discard_sectors - set max sectors for a single discard
     281                 :            :  * @q:  the request queue for the device
     282                 :            :  * @max_discard_sectors: maximum number of sectors to discard
     283                 :            :  **/
     284                 :          0 : void blk_queue_max_discard_sectors(struct request_queue *q,
     285                 :            :                 unsigned int max_discard_sectors)
     286                 :            : {
     287                 :          0 :         q->limits.max_discard_sectors = max_discard_sectors;
     288                 :          0 : }
     289                 :            : EXPORT_SYMBOL(blk_queue_max_discard_sectors);
     290                 :            : 
     291                 :            : /**
     292                 :            :  * blk_queue_max_write_same_sectors - set max sectors for a single write same
     293                 :            :  * @q:  the request queue for the device
     294                 :            :  * @max_write_same_sectors: maximum number of sectors to write per command
     295                 :            :  **/
     296                 :          0 : void blk_queue_max_write_same_sectors(struct request_queue *q,
     297                 :            :                                       unsigned int max_write_same_sectors)
     298                 :            : {
     299                 :          0 :         q->limits.max_write_same_sectors = max_write_same_sectors;
     300                 :          0 : }
     301                 :            : EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
     302                 :            : 
     303                 :            : /**
     304                 :            :  * blk_queue_max_segments - set max hw segments for a request for this queue
     305                 :            :  * @q:  the request queue for the device
     306                 :            :  * @max_segments:  max number of segments
     307                 :            :  *
     308                 :            :  * Description:
     309                 :            :  *    Enables a low level driver to set an upper limit on the number of
     310                 :            :  *    hw data segments in a request.
     311                 :            :  **/
     312                 :          0 : void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
     313                 :            : {
     314 [ #  # ][ #  # ]:          0 :         if (!max_segments) {
     315                 :            :                 max_segments = 1;
     316                 :          0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     317                 :            :                        __func__, max_segments);
     318                 :            :         }
     319                 :            : 
     320                 :          0 :         q->limits.max_segments = max_segments;
     321                 :          0 : }
     322                 :            : EXPORT_SYMBOL(blk_queue_max_segments);
     323                 :            : 
     324                 :            : /**
     325                 :            :  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
     326                 :            :  * @q:  the request queue for the device
     327                 :            :  * @max_size:  max size of segment in bytes
     328                 :            :  *
     329                 :            :  * Description:
     330                 :            :  *    Enables a low level driver to set an upper limit on the size of a
     331                 :            :  *    coalesced segment
     332                 :            :  **/
     333                 :          0 : void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
     334                 :            : {
     335         [ #  # ]:          0 :         if (max_size < PAGE_CACHE_SIZE) {
     336                 :            :                 max_size = PAGE_CACHE_SIZE;
     337                 :          0 :                 printk(KERN_INFO "%s: set to minimum %d\n",
     338                 :            :                        __func__, max_size);
     339                 :            :         }
     340                 :            : 
     341                 :          0 :         q->limits.max_segment_size = max_size;
     342                 :          0 : }
     343                 :            : EXPORT_SYMBOL(blk_queue_max_segment_size);
     344                 :            : 
     345                 :            : /**
     346                 :            :  * blk_queue_logical_block_size - set logical block size for the queue
     347                 :            :  * @q:  the request queue for the device
     348                 :            :  * @size:  the logical block size, in bytes
     349                 :            :  *
     350                 :            :  * Description:
     351                 :            :  *   This should be set to the lowest possible block size that the
     352                 :            :  *   storage device can address.  The default of 512 covers most
     353                 :            :  *   hardware.
     354                 :            :  **/
     355                 :          0 : void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
     356                 :            : {
     357                 :          0 :         q->limits.logical_block_size = size;
     358                 :            : 
     359         [ #  # ]:          0 :         if (q->limits.physical_block_size < size)
     360                 :          0 :                 q->limits.physical_block_size = size;
     361                 :            : 
     362         [ #  # ]:          0 :         if (q->limits.io_min < q->limits.physical_block_size)
     363                 :          0 :                 q->limits.io_min = q->limits.physical_block_size;
     364                 :          0 : }
     365                 :            : EXPORT_SYMBOL(blk_queue_logical_block_size);
     366                 :            : 
     367                 :            : /**
     368                 :            :  * blk_queue_physical_block_size - set physical block size for the queue
     369                 :            :  * @q:  the request queue for the device
     370                 :            :  * @size:  the physical block size, in bytes
     371                 :            :  *
     372                 :            :  * Description:
     373                 :            :  *   This should be set to the lowest possible sector size that the
     374                 :            :  *   hardware can operate on without reverting to read-modify-write
     375                 :            :  *   operations.
     376                 :            :  */
     377                 :          0 : void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
     378                 :            : {
     379                 :          0 :         q->limits.physical_block_size = size;
     380                 :            : 
     381         [ #  # ]:          0 :         if (q->limits.physical_block_size < q->limits.logical_block_size)
     382                 :          0 :                 q->limits.physical_block_size = q->limits.logical_block_size;
     383                 :            : 
     384         [ #  # ]:          0 :         if (q->limits.io_min < q->limits.physical_block_size)
     385                 :          0 :                 q->limits.io_min = q->limits.physical_block_size;
     386                 :          0 : }
     387                 :            : EXPORT_SYMBOL(blk_queue_physical_block_size);
     388                 :            : 
     389                 :            : /**
     390                 :            :  * blk_queue_alignment_offset - set physical block alignment offset
     391                 :            :  * @q:  the request queue for the device
     392                 :            :  * @offset: alignment offset in bytes
     393                 :            :  *
     394                 :            :  * Description:
     395                 :            :  *   Some devices are naturally misaligned to compensate for things like
     396                 :            :  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
     397                 :            :  *   should call this function for devices whose first sector is not
     398                 :            :  *   naturally aligned.
     399                 :            :  */
     400                 :          0 : void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
     401                 :            : {
     402                 :          0 :         q->limits.alignment_offset =
     403                 :          0 :                 offset & (q->limits.physical_block_size - 1);
     404                 :          0 :         q->limits.misaligned = 0;
     405                 :          0 : }
     406                 :            : EXPORT_SYMBOL(blk_queue_alignment_offset);
     407                 :            : 
     408                 :            : /**
     409                 :            :  * blk_limits_io_min - set minimum request size for a device
     410                 :            :  * @limits: the queue limits
     411                 :            :  * @min:  smallest I/O size in bytes
     412                 :            :  *
     413                 :            :  * Description:
     414                 :            :  *   Some devices have an internal block size bigger than the reported
     415                 :            :  *   hardware sector size.  This function can be used to signal the
     416                 :            :  *   smallest I/O the device can perform without incurring a performance
     417                 :            :  *   penalty.
     418                 :            :  */
     419                 :          0 : void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
     420                 :            : {
     421                 :          0 :         limits->io_min = min;
     422                 :            : 
     423 [ #  # ][ #  # ]:          0 :         if (limits->io_min < limits->logical_block_size)
     424                 :          0 :                 limits->io_min = limits->logical_block_size;
     425                 :            : 
     426 [ #  # ][ #  # ]:          0 :         if (limits->io_min < limits->physical_block_size)
     427                 :          0 :                 limits->io_min = limits->physical_block_size;
     428                 :          0 : }
     429                 :            : EXPORT_SYMBOL(blk_limits_io_min);
     430                 :            : 
     431                 :            : /**
     432                 :            :  * blk_queue_io_min - set minimum request size for the queue
     433                 :            :  * @q:  the request queue for the device
     434                 :            :  * @min:  smallest I/O size in bytes
     435                 :            :  *
     436                 :            :  * Description:
     437                 :            :  *   Storage devices may report a granularity or preferred minimum I/O
     438                 :            :  *   size which is the smallest request the device can perform without
     439                 :            :  *   incurring a performance penalty.  For disk drives this is often the
     440                 :            :  *   physical block size.  For RAID arrays it is often the stripe chunk
     441                 :            :  *   size.  A properly aligned multiple of minimum_io_size is the
     442                 :            :  *   preferred request size for workloads where a high number of I/O
     443                 :            :  *   operations is desired.
     444                 :            :  */
     445                 :          0 : void blk_queue_io_min(struct request_queue *q, unsigned int min)
     446                 :            : {
     447                 :            :         blk_limits_io_min(&q->limits, min);
     448                 :          0 : }
     449                 :            : EXPORT_SYMBOL(blk_queue_io_min);
     450                 :            : 
     451                 :            : /**
     452                 :            :  * blk_limits_io_opt - set optimal request size for a device
     453                 :            :  * @limits: the queue limits
     454                 :            :  * @opt:  smallest I/O size in bytes
     455                 :            :  *
     456                 :            :  * Description:
     457                 :            :  *   Storage devices may report an optimal I/O size, which is the
     458                 :            :  *   device's preferred unit for sustained I/O.  This is rarely reported
     459                 :            :  *   for disk drives.  For RAID arrays it is usually the stripe width or
     460                 :            :  *   the internal track size.  A properly aligned multiple of
     461                 :            :  *   optimal_io_size is the preferred request size for workloads where
     462                 :            :  *   sustained throughput is desired.
     463                 :            :  */
     464                 :          0 : void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
     465                 :            : {
     466                 :          0 :         limits->io_opt = opt;
     467                 :          0 : }
     468                 :            : EXPORT_SYMBOL(blk_limits_io_opt);
     469                 :            : 
     470                 :            : /**
     471                 :            :  * blk_queue_io_opt - set optimal request size for the queue
     472                 :            :  * @q:  the request queue for the device
     473                 :            :  * @opt:  optimal request size in bytes
     474                 :            :  *
     475                 :            :  * Description:
     476                 :            :  *   Storage devices may report an optimal I/O size, which is the
     477                 :            :  *   device's preferred unit for sustained I/O.  This is rarely reported
     478                 :            :  *   for disk drives.  For RAID arrays it is usually the stripe width or
     479                 :            :  *   the internal track size.  A properly aligned multiple of
     480                 :            :  *   optimal_io_size is the preferred request size for workloads where
     481                 :            :  *   sustained throughput is desired.
     482                 :            :  */
     483                 :          0 : void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
     484                 :            : {
     485                 :            :         blk_limits_io_opt(&q->limits, opt);
     486                 :          0 : }
     487                 :            : EXPORT_SYMBOL(blk_queue_io_opt);
     488                 :            : 
     489                 :            : /**
     490                 :            :  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
     491                 :            :  * @t:  the stacking driver (top)
     492                 :            :  * @b:  the underlying device (bottom)
     493                 :            :  **/
     494                 :          0 : void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
     495                 :            : {
     496                 :          0 :         blk_stack_limits(&t->limits, &b->limits, 0);
     497                 :          0 : }
     498                 :            : EXPORT_SYMBOL(blk_queue_stack_limits);
     499                 :            : 
     500                 :            : /**
     501                 :            :  * blk_stack_limits - adjust queue_limits for stacked devices
     502                 :            :  * @t:  the stacking driver limits (top device)
     503                 :            :  * @b:  the underlying queue limits (bottom, component device)
     504                 :            :  * @start:  first data sector within component device
     505                 :            :  *
     506                 :            :  * Description:
     507                 :            :  *    This function is used by stacking drivers like MD and DM to ensure
     508                 :            :  *    that all component devices have compatible block sizes and
     509                 :            :  *    alignments.  The stacking driver must provide a queue_limits
     510                 :            :  *    struct (top) and then iteratively call the stacking function for
     511                 :            :  *    all component (bottom) devices.  The stacking function will
     512                 :            :  *    attempt to combine the values and ensure proper alignment.
     513                 :            :  *
     514                 :            :  *    Returns 0 if the top and bottom queue_limits are compatible.  The
     515                 :            :  *    top device's block sizes and alignment offsets may be adjusted to
     516                 :            :  *    ensure alignment with the bottom device. If no compatible sizes
     517                 :            :  *    and alignments exist, -1 is returned and the resulting top
     518                 :            :  *    queue_limits will have the misaligned flag set to indicate that
     519                 :            :  *    the alignment_offset is undefined.
     520                 :            :  */
     521                 :          0 : int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
     522                 :            :                      sector_t start)
     523                 :            : {
     524                 :            :         unsigned int top, bottom, alignment, ret = 0;
     525                 :            : 
     526 [ #  # ][ #  # ]:          0 :         t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
     527 [ #  # ][ #  # ]:          0 :         t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
     528                 :          0 :         t->max_write_same_sectors = min(t->max_write_same_sectors,
     529                 :            :                                         b->max_write_same_sectors);
     530 [ #  # ][ #  # ]:          0 :         t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
     531                 :            : 
     532 [ #  # ][ #  # ]:          0 :         t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
     533                 :            :                                             b->seg_boundary_mask);
     534                 :            : 
     535 [ #  # ][ #  # ]:          0 :         t->max_segments = min_not_zero(t->max_segments, b->max_segments);
                 [ #  # ]
     536 [ #  # ][ #  # ]:          0 :         t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
                 [ #  # ]
     537                 :            :                                                  b->max_integrity_segments);
     538                 :            : 
     539 [ #  # ][ #  # ]:          0 :         t->max_segment_size = min_not_zero(t->max_segment_size,
     540                 :            :                                            b->max_segment_size);
     541                 :            : 
     542                 :          0 :         t->misaligned |= b->misaligned;
     543                 :            : 
     544                 :            :         alignment = queue_limit_alignment_offset(b, start);
     545                 :            : 
     546                 :            :         /* Bottom device has different alignment.  Check that it is
     547                 :            :          * compatible with the current top alignment.
     548                 :            :          */
     549         [ #  # ]:          0 :         if (t->alignment_offset != alignment) {
     550                 :            : 
     551                 :          0 :                 top = max(t->physical_block_size, t->io_min)
     552                 :            :                         + t->alignment_offset;
     553                 :          0 :                 bottom = max(b->physical_block_size, b->io_min) + alignment;
     554                 :            : 
     555                 :            :                 /* Verify that top and bottom intervals line up */
     556         [ #  # ]:          0 :                 if (max(top, bottom) & (min(top, bottom) - 1)) {
     557                 :          0 :                         t->misaligned = 1;
     558                 :            :                         ret = -1;
     559                 :            :                 }
     560                 :            :         }
     561                 :            : 
     562         [ #  # ]:          0 :         t->logical_block_size = max(t->logical_block_size,
     563                 :            :                                     b->logical_block_size);
     564                 :            : 
     565                 :          0 :         t->physical_block_size = max(t->physical_block_size,
     566                 :            :                                      b->physical_block_size);
     567                 :            : 
     568                 :          0 :         t->io_min = max(t->io_min, b->io_min);
     569                 :          0 :         t->io_opt = lcm(t->io_opt, b->io_opt);
     570                 :            : 
     571                 :          0 :         t->cluster &= b->cluster;
     572                 :          0 :         t->discard_zeroes_data &= b->discard_zeroes_data;
     573                 :            : 
     574                 :            :         /* Physical block size a multiple of the logical block size? */
     575         [ #  # ]:          0 :         if (t->physical_block_size & (t->logical_block_size - 1)) {
     576                 :          0 :                 t->physical_block_size = t->logical_block_size;
     577                 :          0 :                 t->misaligned = 1;
     578                 :            :                 ret = -1;
     579                 :            :         }
     580                 :            : 
     581                 :            :         /* Minimum I/O a multiple of the physical block size? */
     582         [ #  # ]:          0 :         if (t->io_min & (t->physical_block_size - 1)) {
     583                 :          0 :                 t->io_min = t->physical_block_size;
     584                 :          0 :                 t->misaligned = 1;
     585                 :            :                 ret = -1;
     586                 :            :         }
     587                 :            : 
     588                 :            :         /* Optimal I/O a multiple of the physical block size? */
     589         [ #  # ]:          0 :         if (t->io_opt & (t->physical_block_size - 1)) {
     590                 :          0 :                 t->io_opt = 0;
     591                 :          0 :                 t->misaligned = 1;
     592                 :            :                 ret = -1;
     593                 :            :         }
     594                 :            : 
     595                 :            :         /* Find lowest common alignment_offset */
     596                 :          0 :         t->alignment_offset = lcm(t->alignment_offset, alignment)
     597                 :          0 :                 & (max(t->physical_block_size, t->io_min) - 1);
     598                 :            : 
     599                 :            :         /* Verify that new alignment_offset is on a logical block boundary */
     600         [ #  # ]:          0 :         if (t->alignment_offset & (t->logical_block_size - 1)) {
     601                 :          0 :                 t->misaligned = 1;
     602                 :            :                 ret = -1;
     603                 :            :         }
     604                 :            : 
     605                 :            :         /* Discard alignment and granularity */
     606         [ #  # ]:          0 :         if (b->discard_granularity) {
     607                 :          0 :                 alignment = queue_limit_discard_alignment(b, start);
     608                 :            : 
     609 [ #  # ][ #  # ]:          0 :                 if (t->discard_granularity != 0 &&
     610                 :          0 :                     t->discard_alignment != alignment) {
     611                 :          0 :                         top = t->discard_granularity + t->discard_alignment;
     612                 :          0 :                         bottom = b->discard_granularity + alignment;
     613                 :            : 
     614                 :            :                         /* Verify that top and bottom intervals line up */
     615         [ #  # ]:          0 :                         if ((max(top, bottom) % min(top, bottom)) != 0)
     616                 :          0 :                                 t->discard_misaligned = 1;
     617                 :            :                 }
     618                 :            : 
     619 [ #  # ][ #  # ]:          0 :                 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
     620                 :            :                                                       b->max_discard_sectors);
     621                 :          0 :                 t->discard_granularity = max(t->discard_granularity,
     622                 :            :                                              b->discard_granularity);
     623                 :          0 :                 t->discard_alignment = lcm(t->discard_alignment, alignment) %
     624                 :            :                         t->discard_granularity;
     625                 :            :         }
     626                 :            : 
     627                 :          0 :         return ret;
     628                 :            : }
     629                 :            : EXPORT_SYMBOL(blk_stack_limits);
     630                 :            : 
     631                 :            : /**
     632                 :            :  * bdev_stack_limits - adjust queue limits for stacked drivers
     633                 :            :  * @t:  the stacking driver limits (top device)
     634                 :            :  * @bdev:  the component block_device (bottom)
     635                 :            :  * @start:  first data sector within component device
     636                 :            :  *
     637                 :            :  * Description:
     638                 :            :  *    Merges queue limits for a top device and a block_device.  Returns
     639                 :            :  *    0 if alignment didn't change.  Returns -1 if adding the bottom
     640                 :            :  *    device caused misalignment.
     641                 :            :  */
     642                 :          0 : int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
     643                 :            :                       sector_t start)
     644                 :            : {
     645                 :            :         struct request_queue *bq = bdev_get_queue(bdev);
     646                 :            : 
     647                 :          0 :         start += get_start_sect(bdev);
     648                 :            : 
     649                 :          0 :         return blk_stack_limits(t, &bq->limits, start);
     650                 :            : }
     651                 :            : EXPORT_SYMBOL(bdev_stack_limits);
     652                 :            : 
     653                 :            : /**
     654                 :            :  * disk_stack_limits - adjust queue limits for stacked drivers
     655                 :            :  * @disk:  MD/DM gendisk (top)
     656                 :            :  * @bdev:  the underlying block device (bottom)
     657                 :            :  * @offset:  offset to beginning of data within component device
     658                 :            :  *
     659                 :            :  * Description:
     660                 :            :  *    Merges the limits for a top level gendisk and a bottom level
     661                 :            :  *    block_device.
     662                 :            :  */
     663                 :          0 : void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
     664                 :            :                        sector_t offset)
     665                 :            : {
     666                 :          0 :         struct request_queue *t = disk->queue;
     667                 :            : 
     668         [ #  # ]:          0 :         if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
     669                 :            :                 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
     670                 :            : 
     671                 :          0 :                 disk_name(disk, 0, top);
     672                 :          0 :                 bdevname(bdev, bottom);
     673                 :            : 
     674                 :          0 :                 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
     675                 :            :                        top, bottom);
     676                 :            :         }
     677                 :          0 : }
     678                 :            : EXPORT_SYMBOL(disk_stack_limits);
     679                 :            : 
     680                 :            : /**
     681                 :            :  * blk_queue_dma_pad - set pad mask
     682                 :            :  * @q:     the request queue for the device
     683                 :            :  * @mask:  pad mask
     684                 :            :  *
     685                 :            :  * Set dma pad mask.
     686                 :            :  *
     687                 :            :  * Appending pad buffer to a request modifies the last entry of a
     688                 :            :  * scatter list such that it includes the pad buffer.
     689                 :            :  **/
     690                 :          0 : void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
     691                 :            : {
     692                 :          0 :         q->dma_pad_mask = mask;
     693                 :          0 : }
     694                 :            : EXPORT_SYMBOL(blk_queue_dma_pad);
     695                 :            : 
     696                 :            : /**
     697                 :            :  * blk_queue_update_dma_pad - update pad mask
     698                 :            :  * @q:     the request queue for the device
     699                 :            :  * @mask:  pad mask
     700                 :            :  *
     701                 :            :  * Update dma pad mask.
     702                 :            :  *
     703                 :            :  * Appending pad buffer to a request modifies the last entry of a
     704                 :            :  * scatter list such that it includes the pad buffer.
     705                 :            :  **/
     706                 :          0 : void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
     707                 :            : {
     708         [ #  # ]:          0 :         if (mask > q->dma_pad_mask)
     709                 :          0 :                 q->dma_pad_mask = mask;
     710                 :          0 : }
     711                 :            : EXPORT_SYMBOL(blk_queue_update_dma_pad);
     712                 :            : 
     713                 :            : /**
     714                 :            :  * blk_queue_dma_drain - Set up a drain buffer for excess dma.
     715                 :            :  * @q:  the request queue for the device
     716                 :            :  * @dma_drain_needed: fn which returns non-zero if drain is necessary
     717                 :            :  * @buf:        physically contiguous buffer
     718                 :            :  * @size:       size of the buffer in bytes
     719                 :            :  *
     720                 :            :  * Some devices have excess DMA problems and can't simply discard (or
     721                 :            :  * zero fill) the unwanted piece of the transfer.  They have to have a
     722                 :            :  * real area of memory to transfer it into.  The use case for this is
     723                 :            :  * ATAPI devices in DMA mode.  If the packet command causes a transfer
     724                 :            :  * bigger than the transfer size some HBAs will lock up if there
     725                 :            :  * aren't DMA elements to contain the excess transfer.  What this API
     726                 :            :  * does is adjust the queue so that the buf is always appended
     727                 :            :  * silently to the scatterlist.
     728                 :            :  *
     729                 :            :  * Note: This routine adjusts max_hw_segments to make room for appending
     730                 :            :  * the drain buffer.  If you call blk_queue_max_segments() after calling
     731                 :            :  * this routine, you must set the limit to one fewer than your device
     732                 :            :  * can support otherwise there won't be room for the drain buffer.
     733                 :            :  */
     734                 :          0 : int blk_queue_dma_drain(struct request_queue *q,
     735                 :            :                                dma_drain_needed_fn *dma_drain_needed,
     736                 :            :                                void *buf, unsigned int size)
     737                 :            : {
     738         [ #  # ]:          0 :         if (queue_max_segments(q) < 2)
     739                 :            :                 return -EINVAL;
     740                 :            :         /* make room for appending the drain */
     741                 :          0 :         blk_queue_max_segments(q, queue_max_segments(q) - 1);
     742                 :          0 :         q->dma_drain_needed = dma_drain_needed;
     743                 :          0 :         q->dma_drain_buffer = buf;
     744                 :          0 :         q->dma_drain_size = size;
     745                 :            : 
     746                 :          0 :         return 0;
     747                 :            : }
     748                 :            : EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
     749                 :            : 
     750                 :            : /**
     751                 :            :  * blk_queue_segment_boundary - set boundary rules for segment merging
     752                 :            :  * @q:  the request queue for the device
     753                 :            :  * @mask:  the memory boundary mask
     754                 :            :  **/
     755                 :          0 : void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
     756                 :            : {
     757         [ #  # ]:          0 :         if (mask < PAGE_CACHE_SIZE - 1) {
     758                 :            :                 mask = PAGE_CACHE_SIZE - 1;
     759                 :          0 :                 printk(KERN_INFO "%s: set to minimum %lx\n",
     760                 :            :                        __func__, mask);
     761                 :            :         }
     762                 :            : 
     763                 :          0 :         q->limits.seg_boundary_mask = mask;
     764                 :          0 : }
     765                 :            : EXPORT_SYMBOL(blk_queue_segment_boundary);
     766                 :            : 
     767                 :            : /**
     768                 :            :  * blk_queue_dma_alignment - set dma length and memory alignment
     769                 :            :  * @q:     the request queue for the device
     770                 :            :  * @mask:  alignment mask
     771                 :            :  *
     772                 :            :  * description:
     773                 :            :  *    set required memory and length alignment for direct dma transactions.
     774                 :            :  *    this is used when building direct io requests for the queue.
     775                 :            :  *
     776                 :            :  **/
     777                 :          0 : void blk_queue_dma_alignment(struct request_queue *q, int mask)
     778                 :            : {
     779                 :          0 :         q->dma_alignment = mask;
     780                 :          0 : }
     781                 :            : EXPORT_SYMBOL(blk_queue_dma_alignment);
     782                 :            : 
     783                 :            : /**
     784                 :            :  * blk_queue_update_dma_alignment - update dma length and memory alignment
     785                 :            :  * @q:     the request queue for the device
     786                 :            :  * @mask:  alignment mask
     787                 :            :  *
     788                 :            :  * description:
     789                 :            :  *    update required memory and length alignment for direct dma transactions.
     790                 :            :  *    If the requested alignment is larger than the current alignment, then
     791                 :            :  *    the current queue alignment is updated to the new value, otherwise it
     792                 :            :  *    is left alone.  The design of this is to allow multiple objects
     793                 :            :  *    (driver, device, transport etc) to set their respective
     794                 :            :  *    alignments without having them interfere.
     795                 :            :  *
     796                 :            :  **/
     797                 :          0 : void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
     798                 :            : {
     799         [ #  # ]:          0 :         BUG_ON(mask > PAGE_SIZE);
     800                 :            : 
     801         [ #  # ]:          0 :         if (mask > q->dma_alignment)
     802                 :          0 :                 q->dma_alignment = mask;
     803                 :          0 : }
     804                 :            : EXPORT_SYMBOL(blk_queue_update_dma_alignment);
     805                 :            : 
     806                 :            : /**
     807                 :            :  * blk_queue_flush - configure queue's cache flush capability
     808                 :            :  * @q:          the request queue for the device
     809                 :            :  * @flush:      0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
     810                 :            :  *
     811                 :            :  * Tell block layer cache flush capability of @q.  If it supports
     812                 :            :  * flushing, REQ_FLUSH should be set.  If it supports bypassing
     813                 :            :  * write cache for individual writes, REQ_FUA should be set.
     814                 :            :  */
     815                 :          0 : void blk_queue_flush(struct request_queue *q, unsigned int flush)
     816                 :            : {
     817 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
                 [ #  # ]
     818                 :            : 
     819 [ #  # ][ #  # ]:          0 :         if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     820                 :          0 :                 flush &= ~REQ_FUA;
     821                 :            : 
     822                 :          0 :         q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
     823                 :          0 : }
     824                 :            : EXPORT_SYMBOL_GPL(blk_queue_flush);
     825                 :            : 
     826                 :          0 : void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
     827                 :            : {
     828                 :          0 :         q->flush_not_queueable = !queueable;
     829                 :          0 : }
     830                 :            : EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
     831                 :            : 
     832                 :          0 : static int __init blk_settings_init(void)
     833                 :            : {
     834                 :          0 :         blk_max_low_pfn = max_low_pfn - 1;
     835                 :          0 :         blk_max_pfn = max_pfn - 1;
     836                 :          0 :         return 0;
     837                 :            : }
     838                 :            : subsys_initcall(blk_settings_init);

Generated by: LCOV version 1.9