LCOV - code coverage report
Current view: top level - arch/arm/include/asm - dma-mapping.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 20 0.0 %
Date: 2014-02-18 Functions: 0 0 -
Branches: 0 70 0.0 %

           Branch data     Line data    Source code
       1                 :            : #ifndef ASMARM_DMA_MAPPING_H
       2                 :            : #define ASMARM_DMA_MAPPING_H
       3                 :            : 
       4                 :            : #ifdef __KERNEL__
       5                 :            : 
       6                 :            : #include <linux/mm_types.h>
       7                 :            : #include <linux/scatterlist.h>
       8                 :            : #include <linux/dma-attrs.h>
       9                 :            : #include <linux/dma-debug.h>
      10                 :            : 
      11                 :            : #include <asm-generic/dma-coherent.h>
      12                 :            : #include <asm/memory.h>
      13                 :            : 
      14                 :            : #include <xen/xen.h>
      15                 :            : #include <asm/xen/hypervisor.h>
      16                 :            : 
      17                 :            : #define DMA_ERROR_CODE  (~0)
      18                 :            : extern struct dma_map_ops arm_dma_ops;
      19                 :            : extern struct dma_map_ops arm_coherent_dma_ops;
      20                 :            : 
      21                 :            : static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
      22                 :            : {
      23 [ #  # ][ #  # ]:          0 :         if (dev && dev->archdata.dma_ops)
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
      24                 :            :                 return dev->archdata.dma_ops;
      25                 :            :         return &arm_dma_ops;
      26                 :            : }
      27                 :            : 
      28                 :            : static inline struct dma_map_ops *get_dma_ops(struct device *dev)
      29                 :            : {
      30                 :            :         if (xen_initial_domain())
      31                 :            :                 return xen_dma_ops;
      32                 :            :         else
      33                 :            :                 return __generic_dma_ops(dev);
      34                 :            : }
      35                 :            : 
      36                 :            : static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
      37                 :            : {
      38                 :            :         BUG_ON(!dev);
      39                 :            :         dev->archdata.dma_ops = ops;
      40                 :            : }
      41                 :            : 
      42                 :            : #include <asm-generic/dma-mapping-common.h>
      43                 :            : 
      44                 :            : static inline int dma_set_mask(struct device *dev, u64 mask)
      45                 :            : {
      46                 :          0 :         return get_dma_ops(dev)->set_dma_mask(dev, mask);
      47                 :            : }
      48                 :            : 
      49                 :            : #ifdef __arch_page_to_dma
      50                 :            : #error Please update to __arch_pfn_to_dma
      51                 :            : #endif
      52                 :            : 
      53                 :            : /*
      54                 :            :  * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
      55                 :            :  * functions used internally by the DMA-mapping API to provide DMA
      56                 :            :  * addresses. They must not be used by drivers.
      57                 :            :  */
      58                 :            : #ifndef __arch_pfn_to_dma
      59                 :            : static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
      60                 :            : {
      61                 :          0 :         return (dma_addr_t)__pfn_to_bus(pfn);
      62                 :            : }
      63                 :            : 
      64                 :            : static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
      65                 :            : {
      66                 :          0 :         return __bus_to_pfn(addr);
      67                 :            : }
      68                 :            : 
      69                 :            : static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
      70                 :            : {
      71                 :            :         return (void *)__bus_to_virt((unsigned long)addr);
      72                 :            : }
      73                 :            : 
      74                 :            : static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
      75                 :            : {
      76                 :            :         return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
      77                 :            : }
      78                 :            : 
      79                 :            : #else
      80                 :            : static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
      81                 :            : {
      82                 :            :         return __arch_pfn_to_dma(dev, pfn);
      83                 :            : }
      84                 :            : 
      85                 :            : static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
      86                 :            : {
      87                 :            :         return __arch_dma_to_pfn(dev, addr);
      88                 :            : }
      89                 :            : 
      90                 :            : static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
      91                 :            : {
      92                 :            :         return __arch_dma_to_virt(dev, addr);
      93                 :            : }
      94                 :            : 
      95                 :            : static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
      96                 :            : {
      97                 :            :         return __arch_virt_to_dma(dev, addr);
      98                 :            : }
      99                 :            : #endif
     100                 :            : 
     101                 :            : /* The ARM override for dma_max_pfn() */
     102                 :            : static inline unsigned long dma_max_pfn(struct device *dev)
     103                 :            : {
     104                 :          0 :         return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
     105                 :            : }
     106                 :            : #define dma_max_pfn(dev) dma_max_pfn(dev)
     107                 :            : 
     108                 :            : static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
     109                 :            : {
     110                 :          0 :         unsigned int offset = paddr & ~PAGE_MASK;
     111                 :          0 :         return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
     112                 :            : }
     113                 :            : 
     114                 :            : static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
     115                 :            : {
     116                 :          0 :         unsigned int offset = dev_addr & ~PAGE_MASK;
     117                 :          0 :         return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
     118                 :            : }
     119                 :            : 
     120                 :            : static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
     121                 :            : {
     122                 :            :         u64 limit, mask;
     123                 :            : 
     124 [ #  # ][ #  # ]:          0 :         if (!dev->dma_mask)
                 [ #  # ]
     125                 :            :                 return 0;
     126                 :            : 
     127                 :          0 :         mask = *dev->dma_mask;
     128                 :            : 
     129                 :          0 :         limit = (mask + 1) & ~mask;
     130 [ #  # ][ #  # ]:          0 :         if (limit && size > limit)
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     131                 :            :                 return 0;
     132                 :            : 
     133 [ #  # ][ #  # ]:          0 :         if ((addr | (addr + size - 1)) & ~mask)
                 [ #  # ]
     134                 :            :                 return 0;
     135                 :            : 
     136                 :            :         return 1;
     137                 :            : }
     138                 :            : 
     139                 :            : static inline void dma_mark_clean(void *addr, size_t size) { }
     140                 :            : 
     141                 :            : /*
     142                 :            :  * DMA errors are defined by all-bits-set in the DMA address.
     143                 :            :  */
     144                 :            : static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
     145                 :            : {
     146                 :            :         debug_dma_mapping_error(dev, dma_addr);
     147                 :            :         return dma_addr == DMA_ERROR_CODE;
     148                 :            : }
     149                 :            : 
     150                 :            : /*
     151                 :            :  * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
     152                 :            :  * function so drivers using this API are highlighted with build warnings.
     153                 :            :  */
     154                 :            : static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
     155                 :            :                 dma_addr_t *handle, gfp_t gfp)
     156                 :            : {
     157                 :            :         return NULL;
     158                 :            : }
     159                 :            : 
     160                 :            : static inline void dma_free_noncoherent(struct device *dev, size_t size,
     161                 :            :                 void *cpu_addr, dma_addr_t handle)
     162                 :            : {
     163                 :            : }
     164                 :            : 
     165                 :            : extern int dma_supported(struct device *dev, u64 mask);
     166                 :            : 
     167                 :            : extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
     168                 :            : 
     169                 :            : /**
     170                 :            :  * arm_dma_alloc - allocate consistent memory for DMA
     171                 :            :  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     172                 :            :  * @size: required memory size
     173                 :            :  * @handle: bus-specific DMA address
     174                 :            :  * @attrs: optinal attributes that specific mapping properties
     175                 :            :  *
     176                 :            :  * Allocate some memory for a device for performing DMA.  This function
     177                 :            :  * allocates pages, and will return the CPU-viewed address, and sets @handle
     178                 :            :  * to be the device-viewed address.
     179                 :            :  */
     180                 :            : extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
     181                 :            :                            gfp_t gfp, struct dma_attrs *attrs);
     182                 :            : 
     183                 :            : #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
     184                 :            : 
     185                 :            : static inline void *dma_alloc_attrs(struct device *dev, size_t size,
     186                 :            :                                        dma_addr_t *dma_handle, gfp_t flag,
     187                 :            :                                        struct dma_attrs *attrs)
     188                 :            : {
     189                 :            :         struct dma_map_ops *ops = get_dma_ops(dev);
     190                 :            :         void *cpu_addr;
     191         [ #  # ]:          0 :         BUG_ON(!ops);
     192                 :            : 
     193                 :          0 :         cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
     194                 :            :         debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
     195                 :            :         return cpu_addr;
     196                 :            : }
     197                 :            : 
     198                 :            : /**
     199                 :            :  * arm_dma_free - free memory allocated by arm_dma_alloc
     200                 :            :  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     201                 :            :  * @size: size of memory originally requested in dma_alloc_coherent
     202                 :            :  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
     203                 :            :  * @handle: device-view address returned from dma_alloc_coherent
     204                 :            :  * @attrs: optinal attributes that specific mapping properties
     205                 :            :  *
     206                 :            :  * Free (and unmap) a DMA buffer previously allocated by
     207                 :            :  * arm_dma_alloc().
     208                 :            :  *
     209                 :            :  * References to memory and mappings associated with cpu_addr/handle
     210                 :            :  * during and after this call executing are illegal.
     211                 :            :  */
     212                 :            : extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
     213                 :            :                          dma_addr_t handle, struct dma_attrs *attrs);
     214                 :            : 
     215                 :            : #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
     216                 :            : 
     217                 :            : static inline void dma_free_attrs(struct device *dev, size_t size,
     218                 :            :                                      void *cpu_addr, dma_addr_t dma_handle,
     219                 :            :                                      struct dma_attrs *attrs)
     220                 :            : {
     221                 :            :         struct dma_map_ops *ops = get_dma_ops(dev);
     222 [ #  # ][ #  # ]:          0 :         BUG_ON(!ops);
     223                 :            : 
     224                 :            :         debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
     225                 :          0 :         ops->free(dev, size, cpu_addr, dma_handle, attrs);
     226                 :            : }
     227                 :            : 
     228                 :            : /**
     229                 :            :  * arm_dma_mmap - map a coherent DMA allocation into user space
     230                 :            :  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     231                 :            :  * @vma: vm_area_struct describing requested user mapping
     232                 :            :  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
     233                 :            :  * @handle: device-view address returned from dma_alloc_coherent
     234                 :            :  * @size: size of memory originally requested in dma_alloc_coherent
     235                 :            :  * @attrs: optinal attributes that specific mapping properties
     236                 :            :  *
     237                 :            :  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
     238                 :            :  * into user space.  The coherent DMA buffer must not be freed by the
     239                 :            :  * driver until the user space mapping has been released.
     240                 :            :  */
     241                 :            : extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
     242                 :            :                         void *cpu_addr, dma_addr_t dma_addr, size_t size,
     243                 :            :                         struct dma_attrs *attrs);
     244                 :            : 
     245                 :            : static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
     246                 :            :                                        dma_addr_t *dma_handle, gfp_t flag)
     247                 :            : {
     248                 :          0 :         DEFINE_DMA_ATTRS(attrs);
     249                 :            :         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
     250                 :            :         return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
     251                 :            : }
     252                 :            : 
     253                 :            : static inline void dma_free_writecombine(struct device *dev, size_t size,
     254                 :            :                                      void *cpu_addr, dma_addr_t dma_handle)
     255                 :            : {
     256                 :          0 :         DEFINE_DMA_ATTRS(attrs);
     257                 :            :         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
     258                 :            :         return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
     259                 :            : }
     260                 :            : 
     261                 :            : /*
     262                 :            :  * This can be called during early boot to increase the size of the atomic
     263                 :            :  * coherent DMA pool above the default value of 256KiB. It must be called
     264                 :            :  * before postcore_initcall.
     265                 :            :  */
     266                 :            : extern void __init init_dma_coherent_pool_size(unsigned long size);
     267                 :            : 
     268                 :            : /*
     269                 :            :  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
     270                 :            :  * and utilize bounce buffers as needed to work around limited DMA windows.
     271                 :            :  *
     272                 :            :  * On the SA-1111, a bug limits DMA to only certain regions of RAM.
     273                 :            :  * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
     274                 :            :  * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
     275                 :            :  *
     276                 :            :  * The following are helper functions used by the dmabounce subystem
     277                 :            :  *
     278                 :            :  */
     279                 :            : 
     280                 :            : /**
     281                 :            :  * dmabounce_register_dev
     282                 :            :  *
     283                 :            :  * @dev: valid struct device pointer
     284                 :            :  * @small_buf_size: size of buffers to use with small buffer pool
     285                 :            :  * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
     286                 :            :  * @needs_bounce_fn: called to determine whether buffer needs bouncing
     287                 :            :  *
     288                 :            :  * This function should be called by low-level platform code to register
     289                 :            :  * a device as requireing DMA buffer bouncing. The function will allocate
     290                 :            :  * appropriate DMA pools for the device.
     291                 :            :  */
     292                 :            : extern int dmabounce_register_dev(struct device *, unsigned long,
     293                 :            :                 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
     294                 :            : 
     295                 :            : /**
     296                 :            :  * dmabounce_unregister_dev
     297                 :            :  *
     298                 :            :  * @dev: valid struct device pointer
     299                 :            :  *
     300                 :            :  * This function should be called by low-level platform code when device
     301                 :            :  * that was previously registered with dmabounce_register_dev is removed
     302                 :            :  * from the system.
     303                 :            :  *
     304                 :            :  */
     305                 :            : extern void dmabounce_unregister_dev(struct device *);
     306                 :            : 
     307                 :            : 
     308                 :            : 
     309                 :            : /*
     310                 :            :  * The scatter list versions of the above methods.
     311                 :            :  */
     312                 :            : extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
     313                 :            :                 enum dma_data_direction, struct dma_attrs *attrs);
     314                 :            : extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
     315                 :            :                 enum dma_data_direction, struct dma_attrs *attrs);
     316                 :            : extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
     317                 :            :                 enum dma_data_direction);
     318                 :            : extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
     319                 :            :                 enum dma_data_direction);
     320                 :            : extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
     321                 :            :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     322                 :            :                 struct dma_attrs *attrs);
     323                 :            : 
     324                 :            : #endif /* __KERNEL__ */
     325                 :            : #endif

Generated by: LCOV version 1.9