LCOV - code coverage report
Current view: top level - arch/arm/include/asm - cacheflush.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 10 14 71.4 %
Date: 2014-02-18 Functions: 0 0 -
Branches: 8 20 40.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  *  arch/arm/include/asm/cacheflush.h
       3                 :            :  *
       4                 :            :  *  Copyright (C) 1999-2002 Russell King
       5                 :            :  *
       6                 :            :  * This program is free software; you can redistribute it and/or modify
       7                 :            :  * it under the terms of the GNU General Public License version 2 as
       8                 :            :  * published by the Free Software Foundation.
       9                 :            :  */
      10                 :            : #ifndef _ASMARM_CACHEFLUSH_H
      11                 :            : #define _ASMARM_CACHEFLUSH_H
      12                 :            : 
      13                 :            : #include <linux/mm.h>
      14                 :            : 
      15                 :            : #include <asm/glue-cache.h>
      16                 :            : #include <asm/shmparam.h>
      17                 :            : #include <asm/cachetype.h>
      18                 :            : #include <asm/outercache.h>
      19                 :            : #include <asm/rodata.h>
      20                 :            : 
      21                 :            : #define CACHE_COLOUR(vaddr)     ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
      22                 :            : 
      23                 :            : /*
      24                 :            :  * This flag is used to indicate that the page pointed to by a pte is clean
      25                 :            :  * and does not require cleaning before returning it to the user.
      26                 :            :  */
      27                 :            : #define PG_dcache_clean PG_arch_1
      28                 :            : 
      29                 :            : /*
      30                 :            :  *      MM Cache Management
      31                 :            :  *      ===================
      32                 :            :  *
      33                 :            :  *      The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
      34                 :            :  *      implement these methods.
      35                 :            :  *
      36                 :            :  *      Start addresses are inclusive and end addresses are exclusive;
      37                 :            :  *      start addresses should be rounded down, end addresses up.
      38                 :            :  *
      39                 :            :  *      See Documentation/cachetlb.txt for more information.
      40                 :            :  *      Please note that the implementation of these, and the required
      41                 :            :  *      effects are cache-type (VIVT/VIPT/PIPT) specific.
      42                 :            :  *
      43                 :            :  *      flush_icache_all()
      44                 :            :  *
      45                 :            :  *              Unconditionally clean and invalidate the entire icache.
      46                 :            :  *              Currently only needed for cache-v6.S and cache-v7.S, see
      47                 :            :  *              __flush_icache_all for the generic implementation.
      48                 :            :  *
      49                 :            :  *      flush_kern_all()
      50                 :            :  *
      51                 :            :  *              Unconditionally clean and invalidate the entire cache.
      52                 :            :  *
      53                 :            :  *     flush_kern_louis()
      54                 :            :  *
      55                 :            :  *             Flush data cache levels up to the level of unification
      56                 :            :  *             inner shareable and invalidate the I-cache.
      57                 :            :  *             Only needed from v7 onwards, falls back to flush_cache_all()
      58                 :            :  *             for all other processor versions.
      59                 :            :  *
      60                 :            :  *      flush_user_all()
      61                 :            :  *
      62                 :            :  *              Clean and invalidate all user space cache entries
      63                 :            :  *              before a change of page tables.
      64                 :            :  *
      65                 :            :  *      flush_user_range(start, end, flags)
      66                 :            :  *
      67                 :            :  *              Clean and invalidate a range of cache entries in the
      68                 :            :  *              specified address space before a change of page tables.
      69                 :            :  *              - start - user start address (inclusive, page aligned)
      70                 :            :  *              - end   - user end address   (exclusive, page aligned)
      71                 :            :  *              - flags - vma->vm_flags field
      72                 :            :  *
      73                 :            :  *      coherent_kern_range(start, end)
      74                 :            :  *
      75                 :            :  *              Ensure coherency between the Icache and the Dcache in the
      76                 :            :  *              region described by start, end.  If you have non-snooping
      77                 :            :  *              Harvard caches, you need to implement this function.
      78                 :            :  *              - start  - virtual start address
      79                 :            :  *              - end    - virtual end address
      80                 :            :  *
      81                 :            :  *      coherent_user_range(start, end)
      82                 :            :  *
      83                 :            :  *              Ensure coherency between the Icache and the Dcache in the
      84                 :            :  *              region described by start, end.  If you have non-snooping
      85                 :            :  *              Harvard caches, you need to implement this function.
      86                 :            :  *              - start  - virtual start address
      87                 :            :  *              - end    - virtual end address
      88                 :            :  *
      89                 :            :  *      flush_kern_dcache_area(kaddr, size)
      90                 :            :  *
      91                 :            :  *              Ensure that the data held in page is written back.
      92                 :            :  *              - kaddr  - page address
      93                 :            :  *              - size   - region size
      94                 :            :  *
      95                 :            :  *      DMA Cache Coherency
      96                 :            :  *      ===================
      97                 :            :  *
      98                 :            :  *      dma_flush_range(start, end)
      99                 :            :  *
     100                 :            :  *              Clean and invalidate the specified virtual address range.
     101                 :            :  *              - start  - virtual start address
     102                 :            :  *              - end    - virtual end address
     103                 :            :  */
     104                 :            : 
     105                 :            : struct cpu_cache_fns {
     106                 :            :         void (*flush_icache_all)(void);
     107                 :            :         void (*flush_kern_all)(void);
     108                 :            :         void (*flush_kern_louis)(void);
     109                 :            :         void (*flush_user_all)(void);
     110                 :            :         void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
     111                 :            : 
     112                 :            :         void (*coherent_kern_range)(unsigned long, unsigned long);
     113                 :            :         int  (*coherent_user_range)(unsigned long, unsigned long);
     114                 :            :         void (*flush_kern_dcache_area)(void *, size_t);
     115                 :            : 
     116                 :            :         void (*dma_map_area)(const void *, size_t, int);
     117                 :            :         void (*dma_unmap_area)(const void *, size_t, int);
     118                 :            : 
     119                 :            :         void (*dma_flush_range)(const void *, const void *);
     120                 :            : };
     121                 :            : 
     122                 :            : /*
     123                 :            :  * Select the calling method
     124                 :            :  */
     125                 :            : #ifdef MULTI_CACHE
     126                 :            : 
     127                 :            : extern struct cpu_cache_fns cpu_cache;
     128                 :            : 
     129                 :            : #define __cpuc_flush_icache_all         cpu_cache.flush_icache_all
     130                 :            : #define __cpuc_flush_kern_all           cpu_cache.flush_kern_all
     131                 :            : #define __cpuc_flush_kern_louis         cpu_cache.flush_kern_louis
     132                 :            : #define __cpuc_flush_user_all           cpu_cache.flush_user_all
     133                 :            : #define __cpuc_flush_user_range         cpu_cache.flush_user_range
     134                 :            : #define __cpuc_coherent_kern_range      cpu_cache.coherent_kern_range
     135                 :            : #define __cpuc_coherent_user_range      cpu_cache.coherent_user_range
     136                 :            : #define __cpuc_flush_dcache_area        cpu_cache.flush_kern_dcache_area
     137                 :            : 
     138                 :            : /*
     139                 :            :  * These are private to the dma-mapping API.  Do not use directly.
     140                 :            :  * Their sole purpose is to ensure that data held in the cache
     141                 :            :  * is visible to DMA, or data written by DMA to system memory is
     142                 :            :  * visible to the CPU.
     143                 :            :  */
     144                 :            : #define dmac_map_area                   cpu_cache.dma_map_area
     145                 :            : #define dmac_unmap_area                 cpu_cache.dma_unmap_area
     146                 :            : #define dmac_flush_range                cpu_cache.dma_flush_range
     147                 :            : 
     148                 :            : #else
     149                 :            : 
     150                 :            : extern void __cpuc_flush_icache_all(void);
     151                 :            : extern void __cpuc_flush_kern_all(void);
     152                 :            : extern void __cpuc_flush_kern_louis(void);
     153                 :            : extern void __cpuc_flush_user_all(void);
     154                 :            : extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
     155                 :            : extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
     156                 :            : extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
     157                 :            : extern void __cpuc_flush_dcache_area(void *, size_t);
     158                 :            : 
     159                 :            : /*
     160                 :            :  * These are private to the dma-mapping API.  Do not use directly.
     161                 :            :  * Their sole purpose is to ensure that data held in the cache
     162                 :            :  * is visible to DMA, or data written by DMA to system memory is
     163                 :            :  * visible to the CPU.
     164                 :            :  */
     165                 :            : extern void dmac_map_area(const void *, size_t, int);
     166                 :            : extern void dmac_unmap_area(const void *, size_t, int);
     167                 :            : extern void dmac_flush_range(const void *, const void *);
     168                 :            : 
     169                 :            : #endif
     170                 :            : 
     171                 :            : /*
     172                 :            :  * Copy user data from/to a page which is mapped into a different
     173                 :            :  * processes address space.  Really, we want to allow our "user
     174                 :            :  * space" model to handle this.
     175                 :            :  */
     176                 :            : extern void copy_to_user_page(struct vm_area_struct *, struct page *,
     177                 :            :         unsigned long, void *, const void *, unsigned long);
     178                 :            : #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
     179                 :            :         do {                                                    \
     180                 :            :                 memcpy(dst, src, len);                          \
     181                 :            :         } while (0)
     182                 :            : 
     183                 :            : /*
     184                 :            :  * Convert calls to our calling convention.
     185                 :            :  */
     186                 :            : 
     187                 :            : /* Invalidate I-cache */
     188                 :            : #define __flush_icache_all_generic()                                    \
     189                 :            :         asm("mcr   p15, 0, %0, c7, c5, 0"                             \
     190                 :            :             : : "r" (0));
     191                 :            : 
     192                 :            : /* Invalidate I-cache inner shareable */
     193                 :            : #define __flush_icache_all_v7_smp()                                     \
     194                 :            :         asm("mcr   p15, 0, %0, c7, c1, 0"                             \
     195                 :            :             : : "r" (0));
     196                 :            : 
     197                 :            : /*
     198                 :            :  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
     199                 :            :  * will fall through to use __flush_icache_all_generic.
     200                 :            :  */
     201                 :            : #if (defined(CONFIG_CPU_V7) && \
     202                 :            :      (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
     203                 :            :         defined(CONFIG_SMP_ON_UP)
     204                 :            : #define __flush_icache_preferred        __cpuc_flush_icache_all
     205                 :            : #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
     206                 :            : #define __flush_icache_preferred        __flush_icache_all_v7_smp
     207                 :            : #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
     208                 :            : #define __flush_icache_preferred        __cpuc_flush_icache_all
     209                 :            : #else
     210                 :            : #define __flush_icache_preferred        __flush_icache_all_generic
     211                 :            : #endif
     212                 :            : 
     213                 :            : static inline void __flush_icache_all(void)
     214                 :            : {
     215                 :            :         __flush_icache_preferred();
     216                 :            : }
     217                 :            : 
     218                 :            : /*
     219                 :            :  * Flush caches up to Level of Unification Inner Shareable
     220                 :            :  */
     221                 :            : #define flush_cache_louis()             __cpuc_flush_kern_louis()
     222                 :            : 
     223                 :            : #define flush_cache_all()               __cpuc_flush_kern_all()
     224                 :            : 
     225                 :            : static inline void vivt_flush_cache_mm(struct mm_struct *mm)
     226                 :            : {
     227                 :            :         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
     228                 :            :                 __cpuc_flush_user_all();
     229                 :            : }
     230                 :            : 
     231                 :            : static inline void
     232                 :            : vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
     233                 :            : {
     234                 :            :         struct mm_struct *mm = vma->vm_mm;
     235                 :            : 
     236                 :            :         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
     237                 :            :                 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
     238                 :            :                                         vma->vm_flags);
     239                 :            : }
     240                 :            : 
     241                 :            : static inline void
     242                 :            : vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
     243                 :            : {
     244                 :            :         struct mm_struct *mm = vma->vm_mm;
     245                 :            : 
     246                 :            :         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
     247                 :            :                 unsigned long addr = user_addr & PAGE_MASK;
     248                 :            :                 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
     249                 :            :         }
     250                 :            : }
     251                 :            : 
     252                 :            : #ifndef CONFIG_CPU_CACHE_VIPT
     253                 :            : #define flush_cache_mm(mm) \
     254                 :            :                 vivt_flush_cache_mm(mm)
     255                 :            : #define flush_cache_range(vma,start,end) \
     256                 :            :                 vivt_flush_cache_range(vma,start,end)
     257                 :            : #define flush_cache_page(vma,addr,pfn) \
     258                 :            :                 vivt_flush_cache_page(vma,addr,pfn)
     259                 :            : #else
     260                 :            : extern void flush_cache_mm(struct mm_struct *mm);
     261                 :            : extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
     262                 :            : extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
     263                 :            : #endif
     264                 :            : 
     265                 :            : #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
     266                 :            : 
     267                 :            : /*
     268                 :            :  * flush_cache_user_range is used when we want to ensure that the
     269                 :            :  * Harvard caches are synchronised for the user space address range.
     270                 :            :  * This is used for the ARM private sys_cacheflush system call.
     271                 :            :  */
     272                 :            : #define flush_cache_user_range(s,e)     __cpuc_coherent_user_range(s,e)
     273                 :            : 
     274                 :            : /*
     275                 :            :  * Perform necessary cache operations to ensure that data previously
     276                 :            :  * stored within this range of addresses can be executed by the CPU.
     277                 :            :  */
     278                 :            : #define flush_icache_range(s,e)         __cpuc_coherent_kern_range(s,e)
     279                 :            : 
     280                 :            : /*
     281                 :            :  * Perform necessary cache operations to ensure that the TLB will
     282                 :            :  * see data written in the specified area.
     283                 :            :  */
     284                 :            : #define clean_dcache_area(start,size)   cpu_dcache_clean_area(start, size)
     285                 :            : 
     286                 :            : /*
     287                 :            :  * flush_dcache_page is used when the kernel has written to the page
     288                 :            :  * cache page at virtual address page->virtual.
     289                 :            :  *
     290                 :            :  * If this page isn't mapped (ie, page_mapping == NULL), or it might
     291                 :            :  * have userspace mappings, then we _must_ always clean + invalidate
     292                 :            :  * the dcache entries associated with the kernel mapping.
     293                 :            :  *
     294                 :            :  * Otherwise we can defer the operation, and clean the cache when we are
     295                 :            :  * about to change to user space.  This is the same method as used on SPARC64.
     296                 :            :  * See update_mmu_cache for the user space part.
     297                 :            :  */
     298                 :            : #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
     299                 :            : extern void flush_dcache_page(struct page *);
     300                 :            : 
     301                 :            : static inline void flush_kernel_vmap_range(void *addr, int size)
     302                 :            : {
     303                 :            :         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
     304                 :            :           __cpuc_flush_dcache_area(addr, (size_t)size);
     305                 :            : }
     306                 :            : static inline void invalidate_kernel_vmap_range(void *addr, int size)
     307                 :            : {
     308                 :            :         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
     309                 :            :           __cpuc_flush_dcache_area(addr, (size_t)size);
     310                 :            : }
     311                 :            : 
     312                 :            : #define ARCH_HAS_FLUSH_ANON_PAGE
     313                 :            : static inline void flush_anon_page(struct vm_area_struct *vma,
     314                 :    2555761 :                          struct page *page, unsigned long vmaddr)
     315                 :            : {
     316                 :            :         extern void __flush_anon_page(struct vm_area_struct *vma,
     317                 :            :                                 struct page *, unsigned long);
     318         [ +  + ]:    2555761 :         if (PageAnon(page))
     319                 :    2555647 :                 __flush_anon_page(vma, page, vmaddr);
     320                 :            : }
     321                 :            : 
     322                 :            : #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
     323                 :            : extern void flush_kernel_dcache_page(struct page *);
     324                 :            : 
     325                 :            : #define flush_dcache_mmap_lock(mapping) \
     326                 :            :         spin_lock_irq(&(mapping)->tree_lock)
     327                 :            : #define flush_dcache_mmap_unlock(mapping) \
     328                 :            :         spin_unlock_irq(&(mapping)->tree_lock)
     329                 :            : 
     330                 :            : #define flush_icache_user_range(vma,page,addr,len) \
     331                 :            :         flush_dcache_page(page)
     332                 :            : 
     333                 :            : /*
     334                 :            :  * We don't appear to need to do anything here.  In fact, if we did, we'd
     335                 :            :  * duplicate cache flushing elsewhere performed by flush_dcache_page().
     336                 :            :  */
     337                 :            : #define flush_icache_page(vma,page)     do { } while (0)
     338                 :            : 
     339                 :            : /*
     340                 :            :  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
     341                 :            :  * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
     342                 :            :  * caches, since the direct-mappings of these pages may contain cached
     343                 :            :  * data, we need to do a full cache flush to ensure that writebacks
     344                 :            :  * don't corrupt data placed into these pages via the new mappings.
     345                 :            :  */
     346                 :            : static inline void flush_cache_vmap(unsigned long start, unsigned long end)
     347                 :            : {
     348 [ #  # ][ -  + ]:     137696 :         if (!cache_is_vipt_nonaliasing())
     349                 :          0 :                 flush_cache_all();
     350                 :            :         else
     351                 :            :                 /*
     352                 :            :                  * set_pte_at() called from vmap_pte_range() does not
     353                 :            :                  * have a DSB after cleaning the cache line.
     354                 :            :                  */
     355                 :     137696 :                 dsb(ishst);
     356                 :            : }
     357                 :            : 
     358                 :            : static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
     359                 :            : {
     360 [ #  # ][ #  # ]:     137616 :         if (!cache_is_vipt_nonaliasing())
                 [ -  + ]
     361                 :          0 :                 flush_cache_all();
     362                 :            : }
     363                 :            : 
     364                 :            : /*
     365                 :            :  * Memory synchronization helpers for mixed cached vs non cached accesses.
     366                 :            :  *
     367                 :            :  * Some synchronization algorithms have to set states in memory with the
     368                 :            :  * cache enabled or disabled depending on the code path.  It is crucial
     369                 :            :  * to always ensure proper cache maintenance to update main memory right
     370                 :            :  * away in that case.
     371                 :            :  *
     372                 :            :  * Any cached write must be followed by a cache clean operation.
     373                 :            :  * Any cached read must be preceded by a cache invalidate operation.
     374                 :            :  * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
     375                 :            :  * operation is needed to avoid discarding possible concurrent writes to the
     376                 :            :  * accessed memory.
     377                 :            :  *
     378                 :            :  * Also, in order to prevent a cached writer from interfering with an
     379                 :            :  * adjacent non-cached writer, each state variable must be located to
     380                 :            :  * a separate cache line.
     381                 :            :  */
     382                 :            : 
     383                 :            : /*
     384                 :            :  * This needs to be >= the max cache writeback size of all
     385                 :            :  * supported platforms included in the current kernel configuration.
     386                 :            :  * This is used to align state variables to their own cache lines.
     387                 :            :  */
     388                 :            : #define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */
     389                 :            : #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
     390                 :            : 
     391                 :            : /*
     392                 :            :  * There is no __cpuc_clean_dcache_area but we use it anyway for
     393                 :            :  * code intent clarity, and alias it to __cpuc_flush_dcache_area.
     394                 :            :  */
     395                 :            : #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
     396                 :            : 
     397                 :            : /*
     398                 :            :  * Ensure preceding writes to *p by this CPU are visible to
     399                 :            :  * subsequent reads by other CPUs:
     400                 :            :  */
     401                 :            : static inline void __sync_cache_range_w(volatile void *p, size_t size)
     402                 :            : {
     403                 :            :         char *_p = (char *)p;
     404                 :            : 
     405                 :    5145203 :         __cpuc_clean_dcache_area(_p, size);
     406                 :   10290368 :         outer_clean_range(__pa(_p), __pa(_p + size));
     407                 :            : }
     408                 :            : 
     409                 :            : /*
     410                 :            :  * Ensure preceding writes to *p by other CPUs are visible to
     411                 :            :  * subsequent reads by this CPU.  We must be careful not to
     412                 :            :  * discard data simultaneously written by another CPU, hence the
     413                 :            :  * usage of flush rather than invalidate operations.
     414                 :            :  */
     415                 :            : static inline void __sync_cache_range_r(volatile void *p, size_t size)
     416                 :            : {
     417                 :            :         char *_p = (char *)p;
     418                 :            : 
     419                 :            : #ifdef CONFIG_OUTER_CACHE
     420 [ -  + ][ -  + ]:    2735262 :         if (outer_cache.flush_range) {
           [ -  +  -  + ]
     421                 :            :                 /*
     422                 :            :                  * Ensure dirty data migrated from other CPUs into our cache
     423                 :            :                  * are cleaned out safely before the outer cache is cleaned:
     424                 :            :                  */
     425                 :          0 :                 __cpuc_clean_dcache_area(_p, size);
     426                 :            : 
     427                 :            :                 /* Clean and invalidate stale data for *p from outer ... */
     428                 :          0 :                 outer_flush_range(__pa(_p), __pa(_p + size));
     429                 :            :         }
     430                 :            : #endif
     431                 :            : 
     432                 :            :         /* ... and inner cache: */
     433                 :    2735262 :         __cpuc_flush_dcache_area(_p, size);
     434                 :            : }
     435                 :            : 
     436                 :            : #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
     437                 :            : #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
     438                 :            : 
     439                 :            : /*
     440                 :            :  * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
     441                 :            :  * To do so we must:
     442                 :            :  *
     443                 :            :  * - Clear the SCTLR.C bit to prevent further cache allocations
     444                 :            :  * - Flush the desired level of cache
     445                 :            :  * - Clear the ACTLR "SMP" bit to disable local coherency
     446                 :            :  *
     447                 :            :  * ... and so without any intervening memory access in between those steps,
     448                 :            :  * not even to the stack.
     449                 :            :  *
     450                 :            :  * WARNING -- After this has been called:
     451                 :            :  *
     452                 :            :  * - No ldrex/strex (and similar) instructions must be used.
     453                 :            :  * - The CPU is obviously no longer coherent with the other CPUs.
     454                 :            :  * - This is unlikely to work as expected if Linux is running non-secure.
     455                 :            :  *
     456                 :            :  * Note:
     457                 :            :  *
     458                 :            :  * - This is known to apply to several ARMv7 processor implementations,
     459                 :            :  *   however some exceptions may exist.  Caveat emptor.
     460                 :            :  *
     461                 :            :  * - The clobber list is dictated by the call to v7_flush_dcache_*.
     462                 :            :  *   fp is preserved to the stack explicitly prior disabling the cache
     463                 :            :  *   since adding it to the clobber list is incompatible with having
     464                 :            :  *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering
     465                 :            :  *   trampoline are inserted by the linker and to keep sp 64-bit aligned.
     466                 :            :  */
     467                 :            : #define v7_exit_coherency_flush(level) \
     468                 :            :         asm volatile( \
     469                 :            :         "stmfd     sp!, {fp, ip} \n\t" \
     470                 :            :         "mrc       p15, 0, r0, c1, c0, 0   @ get SCTLR \n\t" \
     471                 :            :         "bic       r0, r0, #"__stringify(CR_C)" \n\t" \
     472                 :            :         "mcr       p15, 0, r0, c1, c0, 0   @ set SCTLR \n\t" \
     473                 :            :         "isb       \n\t" \
     474                 :            :         "bl        v7_flush_dcache_"__stringify(level)" \n\t" \
     475                 :            :         "clrex     \n\t" \
     476                 :            :         "mrc       p15, 0, r0, c1, c0, 1   @ get ACTLR \n\t" \
     477                 :            :         "bic       r0, r0, #(1 << 6) @ disable local coherency \n\t" \
     478                 :            :         "mcr       p15, 0, r0, c1, c0, 1   @ set ACTLR \n\t" \
     479                 :            :         "isb       \n\t" \
     480                 :            :         "dsb       \n\t" \
     481                 :            :         "ldmfd     sp!, {fp, ip}" \
     482                 :            :         : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
     483                 :            :               "r9","r10","lr","memory" )
     484                 :            : 
     485                 :            : #endif

Generated by: LCOV version 1.9