LCOV - code coverage report
Current view: top level - kernel/locking - mutex.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 85 147 57.8 %
Date: 2014-02-18 Functions: 13 19 68.4 %
Branches: 81 238 34.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * kernel/locking/mutex.c
       3                 :            :  *
       4                 :            :  * Mutexes: blocking mutual exclusion locks
       5                 :            :  *
       6                 :            :  * Started by Ingo Molnar:
       7                 :            :  *
       8                 :            :  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
       9                 :            :  *
      10                 :            :  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
      11                 :            :  * David Howells for suggestions and improvements.
      12                 :            :  *
      13                 :            :  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
      14                 :            :  *    from the -rt tree, where it was originally implemented for rtmutexes
      15                 :            :  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
      16                 :            :  *    and Sven Dietrich.
      17                 :            :  *
      18                 :            :  * Also see Documentation/mutex-design.txt.
      19                 :            :  */
      20                 :            : #include <linux/mutex.h>
      21                 :            : #include <linux/ww_mutex.h>
      22                 :            : #include <linux/sched.h>
      23                 :            : #include <linux/sched/rt.h>
      24                 :            : #include <linux/export.h>
      25                 :            : #include <linux/spinlock.h>
      26                 :            : #include <linux/interrupt.h>
      27                 :            : #include <linux/debug_locks.h>
      28                 :            : 
      29                 :            : /*
      30                 :            :  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
      31                 :            :  * which forces all calls into the slowpath:
      32                 :            :  */
      33                 :            : #ifdef CONFIG_DEBUG_MUTEXES
      34                 :            : # include "mutex-debug.h"
      35                 :            : # include <asm-generic/mutex-null.h>
      36                 :            : #else
      37                 :            : # include "mutex.h"
      38                 :            : # include <asm/mutex.h>
      39                 :            : #endif
      40                 :            : 
      41                 :            : /*
      42                 :            :  * A negative mutex count indicates that waiters are sleeping waiting for the
      43                 :            :  * mutex.
      44                 :            :  */
      45                 :            : #define MUTEX_SHOW_NO_WAITER(mutex)     (atomic_read(&(mutex)->count) >= 0)
      46                 :            : 
      47                 :            : void
      48                 :          0 : __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
      49                 :            : {
      50                 :    6469033 :         atomic_set(&lock->count, 1);
      51                 :    6469033 :         spin_lock_init(&lock->wait_lock);
      52                 :    6469033 :         INIT_LIST_HEAD(&lock->wait_list);
      53                 :            :         mutex_clear_owner(lock);
      54                 :            : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
      55                 :    6469033 :         lock->spin_mlock = NULL;
      56                 :            : #endif
      57                 :            : 
      58                 :            :         debug_mutex_init(lock, name, key);
      59                 :    6469033 : }
      60                 :            : 
      61                 :            : EXPORT_SYMBOL(__mutex_init);
      62                 :            : 
      63                 :            : #ifndef CONFIG_DEBUG_LOCK_ALLOC
      64                 :            : /*
      65                 :            :  * We split the mutex lock/unlock logic into separate fastpath and
      66                 :            :  * slowpath functions, to reduce the register pressure on the fastpath.
      67                 :            :  * We also put the fastpath first in the kernel image, to make sure the
      68                 :            :  * branch is predicted by the CPU as default-untaken.
      69                 :            :  */
      70                 :            : static __used noinline void __sched
      71                 :            : __mutex_lock_slowpath(atomic_t *lock_count);
      72                 :            : 
      73                 :            : /**
      74                 :            :  * mutex_lock - acquire the mutex
      75                 :            :  * @lock: the mutex to be acquired
      76                 :            :  *
      77                 :            :  * Lock the mutex exclusively for this task. If the mutex is not
      78                 :            :  * available right now, it will sleep until it can get it.
      79                 :            :  *
      80                 :            :  * The mutex must later on be released by the same task that
      81                 :            :  * acquired it. Recursive locking is not allowed. The task
      82                 :            :  * may not exit without first unlocking the mutex. Also, kernel
      83                 :            :  * memory where the mutex resides mutex must not be freed with
      84                 :            :  * the mutex still locked. The mutex must first be initialized
      85                 :            :  * (or statically defined) before it can be locked. memset()-ing
      86                 :            :  * the mutex to 0 is not allowed.
      87                 :            :  *
      88                 :            :  * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
      89                 :            :  *   checks that will enforce the restrictions and will also do
      90                 :            :  *   deadlock debugging. )
      91                 :            :  *
      92                 :            :  * This function is similar to (but not equivalent to) down().
      93                 :            :  */
      94                 :          0 : void __sched mutex_lock(struct mutex *lock)
      95                 :            : {
      96                 :            :         might_sleep();
      97                 :            :         /*
      98                 :            :          * The locking fastpath is the 1->0 transition from
      99                 :            :          * 'unlocked' into 'locked' state.
     100                 :            :          */
     101                 :   48577439 :         __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
     102                 :            :         mutex_set_owner(lock);
     103                 :         64 : }
     104                 :            : 
     105                 :            : EXPORT_SYMBOL(mutex_lock);
     106                 :            : #endif
     107                 :            : 
     108                 :            : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
     109                 :            : /*
     110                 :            :  * In order to avoid a stampede of mutex spinners from acquiring the mutex
     111                 :            :  * more or less simultaneously, the spinners need to acquire a MCS lock
     112                 :            :  * first before spinning on the owner field.
     113                 :            :  *
     114                 :            :  * We don't inline mspin_lock() so that perf can correctly account for the
     115                 :            :  * time spent in this lock function.
     116                 :            :  */
     117                 :            : struct mspin_node {
     118                 :            :         struct mspin_node *next ;
     119                 :            :         int               locked;       /* 1 if lock acquired */
     120                 :            : };
     121                 :            : #define MLOCK(mutex)    ((struct mspin_node **)&((mutex)->spin_mlock))
     122                 :            : 
     123                 :            : static noinline
     124                 :          0 : void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
     125                 :            : {
     126                 :            :         struct mspin_node *prev;
     127                 :            : 
     128                 :            :         /* Init node */
     129                 :    2121026 :         node->locked = 0;
     130                 :    2121026 :         node->next   = NULL;
     131                 :            : 
     132                 :    4242052 :         prev = xchg(lock, node);
     133         [ +  + ]:    4242052 :         if (likely(prev == NULL)) {
     134                 :            :                 /* Lock acquired */
     135                 :    2121009 :                 node->locked = 1;
     136                 :    2121009 :                 return;
     137                 :            :         }
     138                 :         17 :         ACCESS_ONCE(prev->next) = node;
     139                 :         17 :         smp_wmb();
     140                 :            :         /* Wait until the lock holder passes the lock down */
     141         [ +  + ]:       1469 :         while (!ACCESS_ONCE(node->locked))
     142                 :       1452 :                 arch_mutex_cpu_relax();
     143                 :            : }
     144                 :            : 
     145                 :          0 : static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
     146                 :            : {
     147                 :    2121026 :         struct mspin_node *next = ACCESS_ONCE(node->next);
     148                 :            : 
     149         [ +  + ]:    2121026 :         if (likely(!next)) {
     150                 :            :                 /*
     151                 :            :                  * Release the lock by setting it to NULL
     152                 :            :                  */
     153         [ +  + ]:    2121025 :                 if (cmpxchg(lock, node, NULL) == node)
     154                 :    2121026 :                         return;
     155                 :            :                 /* Wait until the next pointer is set */
     156         [ -  + ]:         16 :                 while (!(next = ACCESS_ONCE(node->next)))
     157                 :          0 :                         arch_mutex_cpu_relax();
     158                 :            :         }
     159                 :         17 :         ACCESS_ONCE(next->locked) = 1;
     160                 :         17 :         smp_wmb();
     161                 :            : }
     162                 :            : 
     163                 :            : /*
     164                 :            :  * Mutex spinning code migrated from kernel/sched/core.c
     165                 :            :  */
     166                 :            : 
     167                 :            : static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
     168                 :            : {
     169         [ +  + ]: 3629535091 :         if (lock->owner != owner)
     170                 :            :                 return false;
     171                 :            : 
     172                 :            :         /*
     173                 :            :          * Ensure we emit the owner->on_cpu, dereference _after_ checking
     174                 :            :          * lock->owner still matches owner, if that fails, owner might
     175                 :            :          * point to free()d memory, if it still matches, the rcu_read_lock()
     176                 :            :          * ensures the memory stays valid.
     177                 :            :          */
     178                 : 3628077341 :         barrier();
     179                 :            : 
     180                 : 3628077341 :         return owner->on_cpu;
     181                 :            : }
     182                 :            : 
     183                 :            : /*
     184                 :            :  * Look out! "owner" is an entirely speculative pointer
     185                 :            :  * access and not reliable.
     186                 :            :  */
     187                 :            : static noinline
     188                 :          0 : int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
     189                 :            : {
     190                 :            :         rcu_read_lock();
     191         [ +  + ]: 3628804375 :         while (owner_running(lock, owner)) {
     192         [ +  + ]: 3628076774 :                 if (need_resched())
     193                 :            :                         break;
     194                 :            : 
     195                 : 3628073659 :                 arch_mutex_cpu_relax();
     196                 :            :         }
     197                 :            :         rcu_read_unlock();
     198                 :            : 
     199                 :            :         /*
     200                 :            :          * We break out the loop above on need_resched() and when the
     201                 :            :          * owner changed, which is a sign for heavy contention. Return
     202                 :            :          * success only when lock->owner is NULL.
     203                 :            :          */
     204                 :     730716 :         return lock->owner == NULL;
     205                 :            : }
     206                 :            : 
     207                 :            : /*
     208                 :            :  * Initial check for entering the mutex spinning loop
     209                 :            :  */
     210                 :            : static inline int mutex_can_spin_on_owner(struct mutex *lock)
     211                 :            : {
     212                 :            :         struct task_struct *owner;
     213                 :            :         int retval = 1;
     214                 :            : 
     215                 :            :         rcu_read_lock();
     216                 :    1077011 :         owner = ACCESS_ONCE(lock->owner);
     217   [ #  #  #  #  :    1077011 :         if (owner)
          +  +  +  -  +  
                      + ]
     218                 :     850458 :                 retval = owner->on_cpu;
     219                 :            :         rcu_read_unlock();
     220                 :            :         /*
     221                 :            :          * if lock->owner is not set, the mutex owner may have just acquired
     222                 :            :          * it and not set the owner yet or the mutex has been released.
     223                 :            :          */
     224                 :            :         return retval;
     225                 :            : }
     226                 :            : #endif
     227                 :            : 
     228                 :            : static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
     229                 :            : 
     230                 :            : /**
     231                 :            :  * mutex_unlock - release the mutex
     232                 :            :  * @lock: the mutex to be released
     233                 :            :  *
     234                 :            :  * Unlock a mutex that has been locked by this task previously.
     235                 :            :  *
     236                 :            :  * This function must not be used in interrupt context. Unlocking
     237                 :            :  * of a not locked mutex is not allowed.
     238                 :            :  *
     239                 :            :  * This function is similar to (but not equivalent to) up().
     240                 :            :  */
     241                 :          0 : void __sched mutex_unlock(struct mutex *lock)
     242                 :            : {
     243                 :            :         /*
     244                 :            :          * The unlocking fastpath is the 0->1 transition from 'locked'
     245                 :            :          * into 'unlocked' state:
     246                 :            :          */
     247                 :            : #ifndef CONFIG_DEBUG_MUTEXES
     248                 :            :         /*
     249                 :            :          * When debugging is enabled we must not clear the owner before time,
     250                 :            :          * the slow path will always be taken, and that clears the owner field
     251                 :            :          * after verifying that it was indeed current.
     252                 :            :          */
     253                 :            :         mutex_clear_owner(lock);
     254                 :            : #endif
     255                 :   50083963 :         __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
     256                 :       2033 : }
     257                 :            : 
     258                 :            : EXPORT_SYMBOL(mutex_unlock);
     259                 :            : 
     260                 :            : /**
     261                 :            :  * ww_mutex_unlock - release the w/w mutex
     262                 :            :  * @lock: the mutex to be released
     263                 :            :  *
     264                 :            :  * Unlock a mutex that has been locked by this task previously with any of the
     265                 :            :  * ww_mutex_lock* functions (with or without an acquire context). It is
     266                 :            :  * forbidden to release the locks after releasing the acquire context.
     267                 :            :  *
     268                 :            :  * This function must not be used in interrupt context. Unlocking
     269                 :            :  * of a unlocked mutex is not allowed.
     270                 :            :  */
     271                 :          0 : void __sched ww_mutex_unlock(struct ww_mutex *lock)
     272                 :            : {
     273                 :            :         /*
     274                 :            :          * The unlocking fastpath is the 0->1 transition from 'locked'
     275                 :            :          * into 'unlocked' state:
     276                 :            :          */
     277         [ #  # ]:          0 :         if (lock->ctx) {
     278                 :            : #ifdef CONFIG_DEBUG_MUTEXES
     279                 :            :                 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
     280                 :            : #endif
     281         [ #  # ]:          0 :                 if (lock->ctx->acquired > 0)
     282                 :          0 :                         lock->ctx->acquired--;
     283                 :          0 :                 lock->ctx = NULL;
     284                 :            :         }
     285                 :            : 
     286                 :            : #ifndef CONFIG_DEBUG_MUTEXES
     287                 :            :         /*
     288                 :            :          * When debugging is enabled we must not clear the owner before time,
     289                 :            :          * the slow path will always be taken, and that clears the owner field
     290                 :            :          * after verifying that it was indeed current.
     291                 :            :          */
     292                 :            :         mutex_clear_owner(&lock->base);
     293                 :            : #endif
     294                 :          0 :         __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
     295                 :          0 : }
     296                 :            : EXPORT_SYMBOL(ww_mutex_unlock);
     297                 :            : 
     298                 :            : static inline int __sched
     299                 :            : __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
     300                 :            : {
     301                 :            :         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
     302                 :          0 :         struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
     303                 :            : 
     304 [ #  # ][ #  # ]:          0 :         if (!hold_ctx)
     305                 :            :                 return 0;
     306                 :            : 
     307 [ #  # ][ #  # ]:          0 :         if (unlikely(ctx == hold_ctx))
     308                 :            :                 return -EALREADY;
     309                 :            : 
     310 [ #  # ][ #  # ]:          0 :         if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
         [ #  # ][ #  # ]
     311 [ #  # ][ #  # ]:          0 :             (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
     312                 :            : #ifdef CONFIG_DEBUG_MUTEXES
     313                 :            :                 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
     314                 :            :                 ctx->contending_lock = ww;
     315                 :            : #endif
     316                 :            :                 return -EDEADLK;
     317                 :            :         }
     318                 :            : 
     319                 :            :         return 0;
     320                 :            : }
     321                 :            : 
     322                 :            : static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
     323                 :            :                                                    struct ww_acquire_ctx *ww_ctx)
     324                 :            : {
     325                 :            : #ifdef CONFIG_DEBUG_MUTEXES
     326                 :            :         /*
     327                 :            :          * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
     328                 :            :          * but released with a normal mutex_unlock in this call.
     329                 :            :          *
     330                 :            :          * This should never happen, always use ww_mutex_unlock.
     331                 :            :          */
     332                 :            :         DEBUG_LOCKS_WARN_ON(ww->ctx);
     333                 :            : 
     334                 :            :         /*
     335                 :            :          * Not quite done after calling ww_acquire_done() ?
     336                 :            :          */
     337                 :            :         DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
     338                 :            : 
     339                 :            :         if (ww_ctx->contending_lock) {
     340                 :            :                 /*
     341                 :            :                  * After -EDEADLK you tried to
     342                 :            :                  * acquire a different ww_mutex? Bad!
     343                 :            :                  */
     344                 :            :                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
     345                 :            : 
     346                 :            :                 /*
     347                 :            :                  * You called ww_mutex_lock after receiving -EDEADLK,
     348                 :            :                  * but 'forgot' to unlock everything else first?
     349                 :            :                  */
     350                 :            :                 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
     351                 :            :                 ww_ctx->contending_lock = NULL;
     352                 :            :         }
     353                 :            : 
     354                 :            :         /*
     355                 :            :          * Naughty, using a different class will lead to undefined behavior!
     356                 :            :          */
     357                 :            :         DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
     358                 :            : #endif
     359                 :          0 :         ww_ctx->acquired++;
     360                 :            : }
     361                 :            : 
     362                 :            : /*
     363                 :            :  * after acquiring lock with fastpath or when we lost out in contested
     364                 :            :  * slowpath, set ctx and wake up any waiters so they can recheck.
     365                 :            :  *
     366                 :            :  * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
     367                 :            :  * as the fastpath and opportunistic spinning are disabled in that case.
     368                 :            :  */
     369                 :            : static __always_inline void
     370                 :            : ww_mutex_set_context_fastpath(struct ww_mutex *lock,
     371                 :            :                                struct ww_acquire_ctx *ctx)
     372                 :            : {
     373                 :            :         unsigned long flags;
     374                 :            :         struct mutex_waiter *cur;
     375                 :            : 
     376                 :            :         ww_mutex_lock_acquired(lock, ctx);
     377                 :            : 
     378                 :          0 :         lock->ctx = ctx;
     379                 :            : 
     380                 :            :         /*
     381                 :            :          * The lock->ctx update should be visible on all cores before
     382                 :            :          * the atomic read is done, otherwise contended waiters might be
     383                 :            :          * missed. The contended waiters will either see ww_ctx == NULL
     384                 :            :          * and keep spinning, or it will acquire wait_lock, add itself
     385                 :            :          * to waiter list and sleep.
     386                 :            :          */
     387                 :          0 :         smp_mb(); /* ^^^ */
     388                 :            : 
     389                 :            :         /*
     390                 :            :          * Check if lock is contended, if not there is nobody to wake up
     391                 :            :          */
     392   [ #  #  #  #  :          0 :         if (likely(atomic_read(&lock->base.count) == 0))
             #  #  #  # ]
     393                 :            :                 return;
     394                 :            : 
     395                 :            :         /*
     396                 :            :          * Uh oh, we raced in fastpath, wake up everyone in this case,
     397                 :            :          * so they can see the new lock->ctx.
     398                 :            :          */
     399                 :            :         spin_lock_mutex(&lock->base.wait_lock, flags);
     400 [ #  # ][ #  # ]:          0 :         list_for_each_entry(cur, &lock->base.wait_list, list) {
         [ #  # ][ #  # ]
     401                 :            :                 debug_mutex_wake_waiter(&lock->base, cur);
     402                 :          0 :                 wake_up_process(cur->task);
     403                 :            :         }
     404                 :            :         spin_unlock_mutex(&lock->base.wait_lock, flags);
     405                 :            : }
     406                 :            : 
     407                 :            : /*
     408                 :            :  * Lock a mutex (possibly interruptible), slowpath:
     409                 :            :  */
     410                 :            : static __always_inline int __sched
     411                 :            : __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
     412                 :            :                     struct lockdep_map *nest_lock, unsigned long ip,
     413                 :            :                     struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
     414                 :            : {
     415                 :    1077011 :         struct task_struct *task = current;
     416                 :            :         struct mutex_waiter waiter;
     417                 :            :         unsigned long flags;
     418                 :            :         int ret;
     419                 :            : 
     420                 :    1077011 :         preempt_disable();
     421                 :            :         mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
     422                 :            : 
     423                 :            : #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
     424                 :            :         /*
     425                 :            :          * Optimistic spinning.
     426                 :            :          *
     427                 :            :          * We try to spin for acquisition when we find that there are no
     428                 :            :          * pending waiters and the lock owner is currently running on a
     429                 :            :          * (different) CPU.
     430                 :            :          *
     431                 :            :          * The rationale is that if the lock owner is running, it is likely to
     432                 :            :          * release the lock soon.
     433                 :            :          *
     434                 :            :          * Since this needs the lock owner, and this mutex implementation
     435                 :            :          * doesn't track the owner atomically in the lock field, we need to
     436                 :            :          * track it non-atomically.
     437                 :            :          *
     438                 :            :          * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
     439                 :            :          * to serialize everything.
     440                 :            :          *
     441                 :            :          * The mutex spinners are queued up using MCS lock so that only one
     442                 :            :          * spinner can compete for the mutex. However, if mutex spinning isn't
     443                 :            :          * going to happen, there is no point in going through the lock/unlock
     444                 :            :          * overhead.
     445                 :            :          */
     446 [ #  # ][ #  # ]:    1077010 :         if (!mutex_can_spin_on_owner(lock))
         [ +  - ][ +  + ]
                 [ +  + ]
     447                 :            :                 goto slowpath;
     448                 :            : 
     449                 :            :         for (;;) {
     450                 :            :                 struct task_struct *owner;
     451                 :            :                 struct mspin_node  node;
     452                 :            : 
     453 [ #  # ][ #  # ]:          0 :                 if (use_ww_ctx && ww_ctx->acquired > 0) {
     454                 :            :                         struct ww_mutex *ww;
     455                 :            : 
     456                 :            :                         ww = container_of(lock, struct ww_mutex, base);
     457                 :            :                         /*
     458                 :            :                          * If ww->ctx is set the contents are undefined, only
     459                 :            :                          * by acquiring wait_lock there is a guarantee that
     460                 :            :                          * they are not invalid when reading.
     461                 :            :                          *
     462                 :            :                          * As such, when deadlock detection needs to be
     463                 :            :                          * performed the optimistic spinning cannot be done.
     464                 :            :                          */
     465 [ #  # ][ #  # ]:          0 :                         if (ACCESS_ONCE(ww->ctx))
     466                 :            :                                 goto slowpath;
     467                 :            :                 }
     468                 :            : 
     469                 :            :                 /*
     470                 :            :                  * If there's an owner, wait for it to either
     471                 :            :                  * release the lock or go to sleep.
     472                 :            :                  */
     473                 :    2121026 :                 mspin_lock(MLOCK(lock), &node);
     474                 :    2121026 :                 owner = ACCESS_ONCE(lock->owner);
     475         [ #  # ]:    2121026 :                 if (owner && !mutex_spin_on_owner(lock, owner)) {
           [ #  #  #  # ]
           [ #  #  +  + ]
           [ -  +  +  + ]
           [ -  +  +  + ]
                 [ +  + ]
     476                 :       3597 :                         mspin_unlock(MLOCK(lock), &node);
     477                 :            :                         goto slowpath;
     478                 :            :                 }
     479                 :            : 
     480   [ #  #  #  # ]:    3189061 :                 if ((atomic_read(&lock->count) == 1) &&
           [ #  #  #  # ]
           [ +  +  +  - ]
           [ +  +  +  - ]
           [ +  +  +  + ]
     481                 :    1071632 :                     (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
     482                 :            :                         lock_acquired(&lock->dep_map, ip);
     483                 :            :                         if (use_ww_ctx) {
     484                 :            :                                 struct ww_mutex *ww;
     485                 :            :                                 ww = container_of(lock, struct ww_mutex, base);
     486                 :            : 
     487                 :            :                                 ww_mutex_set_context_fastpath(ww, ww_ctx);
     488                 :            :                         }
     489                 :            : 
     490                 :            :                         mutex_set_owner(lock);
     491                 :    1071476 :                         mspin_unlock(MLOCK(lock), &node);
     492                 :    1071476 :                         preempt_enable();
     493                 :    1071476 :                         return 0;
     494                 :            :                 }
     495                 :    1045953 :                 mspin_unlock(MLOCK(lock), &node);
     496                 :            : 
     497                 :            :                 /*
     498                 :            :                  * When there's no owner, we might have preempted between the
     499                 :            :                  * owner acquiring the lock and setting the owner field. If
     500                 :            :                  * we're an RT task that will live-lock because we won't let
     501                 :            :                  * the owner complete.
     502                 :            :                  */
     503 [ #  # ][ #  # ]:    1364990 :                 if (!owner && (need_resched() || rt_task(task)))
           [ #  #  #  # ]
                 [ #  # ]
           [ #  #  +  + ]
                 [ +  - ]
           [ +  -  -  + ]
                 [ #  # ]
           [ #  #  +  + ]
         [ +  + ][ +  - ]
     504                 :            :                         goto slowpath;
     505                 :            : 
     506                 :            :                 /*
     507                 :            :                  * The cpu_relax() call is a compiler barrier which forces
     508                 :            :                  * everything in this loop to be re-loaded. We don't need
     509                 :            :                  * memory barriers as we'll eventually observe the right
     510                 :            :                  * values at the cost of a few extra spins.
     511                 :            :                  */
     512                 :    1049550 :                 arch_mutex_cpu_relax();
     513                 :            :         }
     514                 :            : slowpath:
     515                 :            : #endif
     516                 :            :         spin_lock_mutex(&lock->wait_lock, flags);
     517                 :            : 
     518                 :            :         /* once more, can we acquire the lock? */
     519   [ #  #  #  #  :       5656 :         if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
          #  #  #  #  #  
          #  #  #  -  +  
          #  #  +  +  +  
                      + ]
     520                 :            :                 goto skip_wait;
     521                 :            : 
     522                 :            :         debug_mutex_lock_common(lock, &waiter);
     523                 :            :         debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
     524                 :            : 
     525                 :            :         /* add waiting tasks to the end of the waitqueue (FIFO): */
     526                 :       5489 :         list_add_tail(&waiter.list, &lock->wait_list);
     527                 :       5489 :         waiter.task = task;
     528                 :            : 
     529                 :            :         lock_contended(&lock->dep_map, ip);
     530                 :            : 
     531                 :            :         for (;;) {
     532                 :            :                 /*
     533                 :            :                  * Lets try to take the lock again - this is needed even if
     534                 :            :                  * we get here for the first time (shortly after failing to
     535                 :            :                  * acquire the lock), to make sure that we get a wakeup once
     536                 :            :                  * it's unlocked. Later on, if we sleep, this is the
     537                 :            :                  * operation that gives us the lock. We xchg it to -1, so
     538                 :            :                  * that when we release the lock, we properly wake up the
     539                 :            :                  * other waiters:
     540                 :            :                  */
     541   [ #  #  #  # ]:      35364 :                 if (MUTEX_SHOW_NO_WAITER(lock) &&
           [ #  #  #  # ]
           [ #  #  #  # ]
           [ +  +  -  + ]
           [ +  +  +  + ]
     542                 :      14570 :                     (atomic_xchg(&lock->count, -1) == 1))
     543                 :            :                         break;
     544                 :            : 
     545                 :            :                 /*
     546                 :            :                  * got a signal? (This code gets eliminated in the
     547                 :            :                  * TASK_UNINTERRUPTIBLE case.)
     548                 :            :                  */
     549 [ #  # ][ #  # ]:         45 :                 if (unlikely(signal_pending_state(state, task))) {
                 [ +  + ]
     550                 :            :                         ret = -EINTR;
     551                 :            :                         goto err;
     552                 :            :                 }
     553                 :            : 
     554 [ #  # ][ #  # ]:          0 :                 if (use_ww_ctx && ww_ctx->acquired > 0) {
     555                 :            :                         ret = __mutex_lock_check_stamp(lock, ww_ctx);
     556 [ #  # ][ #  # ]:          0 :                         if (ret)
     557                 :            :                                 goto err;
     558                 :            :                 }
     559                 :            : 
     560                 :      15199 :                 __set_task_state(task, state);
     561                 :            : 
     562                 :            :                 /* didn't get the lock, go to sleep: */
     563                 :            :                 spin_unlock_mutex(&lock->wait_lock, flags);
     564                 :      15199 :                 schedule_preempt_disabled();
     565                 :            :                 spin_lock_mutex(&lock->wait_lock, flags);
     566                 :            :         }
     567                 :       5477 :         mutex_remove_waiter(lock, &waiter, current_thread_info());
     568                 :            :         /* set it to 0 if there are no waiters left: */
     569 [ #  # ][ #  # ]:       5477 :         if (likely(list_empty(&lock->wait_list)))
         [ #  # ][ +  + ]
                 [ +  + ]
     570                 :       4185 :                 atomic_set(&lock->count, 0);
     571                 :            :         debug_mutex_free_waiter(&waiter);
     572                 :            : 
     573                 :            : skip_wait:
     574                 :            :         /* got the lock - cleanup and rejoice! */
     575                 :            :         lock_acquired(&lock->dep_map, ip);
     576                 :            :         mutex_set_owner(lock);
     577                 :            : 
     578                 :            :         if (use_ww_ctx) {
     579                 :            :                 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
     580                 :            :                 struct mutex_waiter *cur;
     581                 :            : 
     582                 :            :                 /*
     583                 :            :                  * This branch gets optimized out for the common case,
     584                 :            :                  * and is only important for ww_mutex_lock.
     585                 :            :                  */
     586                 :            :                 ww_mutex_lock_acquired(ww, ww_ctx);
     587                 :          0 :                 ww->ctx = ww_ctx;
     588                 :            : 
     589                 :            :                 /*
     590                 :            :                  * Give any possible sleeping processes the chance to wake up,
     591                 :            :                  * so they can recheck if they have to back off.
     592                 :            :                  */
     593 [ #  # ][ #  # ]:          0 :                 list_for_each_entry(cur, &lock->wait_list, list) {
     594                 :            :                         debug_mutex_wake_waiter(lock, cur);
     595                 :          0 :                         wake_up_process(cur->task);
     596                 :            :                 }
     597                 :            :         }
     598                 :            : 
     599                 :            :         spin_unlock_mutex(&lock->wait_lock, flags);
     600                 :       5524 :         preempt_enable();
     601                 :            :         return 0;
     602                 :            : 
     603                 :            : err:
     604                 :         11 :         mutex_remove_waiter(lock, &waiter, task_thread_info(task));
     605                 :            :         spin_unlock_mutex(&lock->wait_lock, flags);
     606                 :            :         debug_mutex_free_waiter(&waiter);
     607                 :            :         mutex_release(&lock->dep_map, 1, ip);
     608                 :         11 :         preempt_enable();
     609                 :            :         return ret;
     610                 :            : }
     611                 :            : 
     612                 :            : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     613                 :            : void __sched
     614                 :            : mutex_lock_nested(struct mutex *lock, unsigned int subclass)
     615                 :            : {
     616                 :            :         might_sleep();
     617                 :            :         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
     618                 :            :                             subclass, NULL, _RET_IP_, NULL, 0);
     619                 :            : }
     620                 :            : 
     621                 :            : EXPORT_SYMBOL_GPL(mutex_lock_nested);
     622                 :            : 
     623                 :            : void __sched
     624                 :            : _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
     625                 :            : {
     626                 :            :         might_sleep();
     627                 :            :         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
     628                 :            :                             0, nest, _RET_IP_, NULL, 0);
     629                 :            : }
     630                 :            : 
     631                 :            : EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
     632                 :            : 
     633                 :            : int __sched
     634                 :            : mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
     635                 :            : {
     636                 :            :         might_sleep();
     637                 :            :         return __mutex_lock_common(lock, TASK_KILLABLE,
     638                 :            :                                    subclass, NULL, _RET_IP_, NULL, 0);
     639                 :            : }
     640                 :            : EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
     641                 :            : 
     642                 :            : int __sched
     643                 :            : mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
     644                 :            : {
     645                 :            :         might_sleep();
     646                 :            :         return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
     647                 :            :                                    subclass, NULL, _RET_IP_, NULL, 0);
     648                 :            : }
     649                 :            : 
     650                 :            : EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
     651                 :            : 
     652                 :            : static inline int
     653                 :            : ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     654                 :            : {
     655                 :            : #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
     656                 :            :         unsigned tmp;
     657                 :            : 
     658                 :            :         if (ctx->deadlock_inject_countdown-- == 0) {
     659                 :            :                 tmp = ctx->deadlock_inject_interval;
     660                 :            :                 if (tmp > UINT_MAX/4)
     661                 :            :                         tmp = UINT_MAX;
     662                 :            :                 else
     663                 :            :                         tmp = tmp*2 + tmp + tmp/2;
     664                 :            : 
     665                 :            :                 ctx->deadlock_inject_interval = tmp;
     666                 :            :                 ctx->deadlock_inject_countdown = tmp;
     667                 :            :                 ctx->contending_lock = lock;
     668                 :            : 
     669                 :            :                 ww_mutex_unlock(lock);
     670                 :            : 
     671                 :            :                 return -EDEADLK;
     672                 :            :         }
     673                 :            : #endif
     674                 :            : 
     675                 :            :         return 0;
     676                 :            : }
     677                 :            : 
     678                 :            : int __sched
     679                 :            : __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     680                 :            : {
     681                 :            :         int ret;
     682                 :            : 
     683                 :            :         might_sleep();
     684                 :            :         ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
     685                 :            :                                    0, &ctx->dep_map, _RET_IP_, ctx, 1);
     686                 :            :         if (!ret && ctx->acquired > 1)
     687                 :            :                 return ww_mutex_deadlock_injection(lock, ctx);
     688                 :            : 
     689                 :            :         return ret;
     690                 :            : }
     691                 :            : EXPORT_SYMBOL_GPL(__ww_mutex_lock);
     692                 :            : 
     693                 :            : int __sched
     694                 :            : __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     695                 :            : {
     696                 :            :         int ret;
     697                 :            : 
     698                 :            :         might_sleep();
     699                 :            :         ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
     700                 :            :                                   0, &ctx->dep_map, _RET_IP_, ctx, 1);
     701                 :            : 
     702                 :            :         if (!ret && ctx->acquired > 1)
     703                 :            :                 return ww_mutex_deadlock_injection(lock, ctx);
     704                 :            : 
     705                 :            :         return ret;
     706                 :            : }
     707                 :            : EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
     708                 :            : 
     709                 :            : #endif
     710                 :            : 
     711                 :            : /*
     712                 :            :  * Release the lock, slowpath:
     713                 :            :  */
     714                 :            : static inline void
     715                 :            : __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
     716                 :            : {
     717                 :            :         struct mutex *lock = container_of(lock_count, struct mutex, count);
     718                 :            :         unsigned long flags;
     719                 :            : 
     720                 :            :         spin_lock_mutex(&lock->wait_lock, flags);
     721                 :            :         mutex_release(&lock->dep_map, nested, _RET_IP_);
     722                 :            :         debug_mutex_unlock(lock);
     723                 :            : 
     724                 :            :         /*
     725                 :            :          * some architectures leave the lock unlocked in the fastpath failure
     726                 :            :          * case, others need to leave it locked. In the later case we have to
     727                 :            :          * unlock it here
     728                 :            :          */
     729                 :            :         if (__mutex_slowpath_needs_to_unlock())
     730                 :    1084930 :                 atomic_set(&lock->count, 1);
     731                 :            : 
     732         [ +  + ]:    1084930 :         if (!list_empty(&lock->wait_list)) {
     733                 :            :                 /* get the first entry from the wait-list: */
     734                 :            :                 struct mutex_waiter *waiter =
     735                 :            :                                 list_entry(lock->wait_list.next,
     736                 :            :                                            struct mutex_waiter, list);
     737                 :            : 
     738                 :            :                 debug_mutex_wake_waiter(lock, waiter);
     739                 :            : 
     740                 :     183630 :                 wake_up_process(waiter->task);
     741                 :            :         }
     742                 :            : 
     743                 :            :         spin_unlock_mutex(&lock->wait_lock, flags);
     744                 :            : }
     745                 :            : 
     746                 :            : /*
     747                 :            :  * Release the lock, slowpath:
     748                 :            :  */
     749                 :            : static __used noinline void
     750                 :          0 : __mutex_unlock_slowpath(atomic_t *lock_count)
     751                 :            : {
     752                 :            :         __mutex_unlock_common_slowpath(lock_count, 1);
     753                 :    1084930 : }
     754                 :            : 
     755                 :            : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     756                 :            : /*
     757                 :            :  * Here come the less common (and hence less performance-critical) APIs:
     758                 :            :  * mutex_lock_interruptible() and mutex_trylock().
     759                 :            :  */
     760                 :            : static noinline int __sched
     761                 :            : __mutex_lock_killable_slowpath(struct mutex *lock);
     762                 :            : 
     763                 :            : static noinline int __sched
     764                 :            : __mutex_lock_interruptible_slowpath(struct mutex *lock);
     765                 :            : 
     766                 :            : /**
     767                 :            :  * mutex_lock_interruptible - acquire the mutex, interruptible
     768                 :            :  * @lock: the mutex to be acquired
     769                 :            :  *
     770                 :            :  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
     771                 :            :  * been acquired or sleep until the mutex becomes available. If a
     772                 :            :  * signal arrives while waiting for the lock then this function
     773                 :            :  * returns -EINTR.
     774                 :            :  *
     775                 :            :  * This function is similar to (but not equivalent to) down_interruptible().
     776                 :            :  */
     777                 :          0 : int __sched mutex_lock_interruptible(struct mutex *lock)
     778                 :            : {
     779                 :            :         int ret;
     780                 :            : 
     781                 :            :         might_sleep();
     782                 :    1111745 :         ret =  __mutex_fastpath_lock_retval(&lock->count);
     783         [ +  + ]:    1111804 :         if (likely(!ret)) {
     784                 :            :                 mutex_set_owner(lock);
     785                 :    1111770 :                 return 0;
     786                 :            :         } else
     787                 :         34 :                 return __mutex_lock_interruptible_slowpath(lock);
     788                 :            : }
     789                 :            : 
     790                 :            : EXPORT_SYMBOL(mutex_lock_interruptible);
     791                 :            : 
     792                 :          0 : int __sched mutex_lock_killable(struct mutex *lock)
     793                 :            : {
     794                 :            :         int ret;
     795                 :            : 
     796                 :            :         might_sleep();
     797                 :     120598 :         ret = __mutex_fastpath_lock_retval(&lock->count);
     798         [ +  + ]:     120600 :         if (likely(!ret)) {
     799                 :            :                 mutex_set_owner(lock);
     800                 :     120527 :                 return 0;
     801                 :            :         } else
     802                 :         73 :                 return __mutex_lock_killable_slowpath(lock);
     803                 :            : }
     804                 :            : EXPORT_SYMBOL(mutex_lock_killable);
     805                 :            : 
     806                 :            : static __used noinline void __sched
     807                 :          0 : __mutex_lock_slowpath(atomic_t *lock_count)
     808                 :            : {
     809                 :            :         struct mutex *lock = container_of(lock_count, struct mutex, count);
     810                 :            : 
     811                 :            :         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
     812                 :    1076904 :                             NULL, _RET_IP_, NULL, 0);
     813                 :    1076904 : }
     814                 :            : 
     815                 :            : static noinline int __sched
     816                 :          0 : __mutex_lock_killable_slowpath(struct mutex *lock)
     817                 :            : {
     818                 :         73 :         return __mutex_lock_common(lock, TASK_KILLABLE, 0,
     819                 :         73 :                                    NULL, _RET_IP_, NULL, 0);
     820                 :            : }
     821                 :            : 
     822                 :            : static noinline int __sched
     823                 :          0 : __mutex_lock_interruptible_slowpath(struct mutex *lock)
     824                 :            : {
     825                 :         34 :         return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
     826                 :         34 :                                    NULL, _RET_IP_, NULL, 0);
     827                 :            : }
     828                 :            : 
     829                 :            : static noinline int __sched
     830                 :          0 : __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     831                 :            : {
     832                 :          0 :         return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
     833                 :          0 :                                    NULL, _RET_IP_, ctx, 1);
     834                 :            : }
     835                 :            : 
     836                 :            : static noinline int __sched
     837                 :          0 : __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
     838                 :            :                                             struct ww_acquire_ctx *ctx)
     839                 :            : {
     840                 :          0 :         return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
     841                 :          0 :                                    NULL, _RET_IP_, ctx, 1);
     842                 :            : }
     843                 :            : 
     844                 :            : #endif
     845                 :            : 
     846                 :            : /*
     847                 :            :  * Spinlock based trylock, we take the spinlock and check whether we
     848                 :            :  * can get the lock:
     849                 :            :  */
     850                 :            : static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
     851                 :            : {
     852                 :            :         struct mutex *lock = container_of(lock_count, struct mutex, count);
     853                 :            :         unsigned long flags;
     854                 :            :         int prev;
     855                 :            : 
     856                 :            :         spin_lock_mutex(&lock->wait_lock, flags);
     857                 :            : 
     858                 :            :         prev = atomic_xchg(&lock->count, -1);
     859                 :            :         if (likely(prev == 1)) {
     860                 :            :                 mutex_set_owner(lock);
     861                 :            :                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
     862                 :            :         }
     863                 :            : 
     864                 :            :         /* Set it back to 0 if there are no waiters: */
     865                 :            :         if (likely(list_empty(&lock->wait_list)))
     866                 :            :                 atomic_set(&lock->count, 0);
     867                 :            : 
     868                 :            :         spin_unlock_mutex(&lock->wait_lock, flags);
     869                 :            : 
     870                 :            :         return prev == 1;
     871                 :            : }
     872                 :            : 
     873                 :            : /**
     874                 :            :  * mutex_trylock - try to acquire the mutex, without waiting
     875                 :            :  * @lock: the mutex to be acquired
     876                 :            :  *
     877                 :            :  * Try to acquire the mutex atomically. Returns 1 if the mutex
     878                 :            :  * has been acquired successfully, and 0 on contention.
     879                 :            :  *
     880                 :            :  * NOTE: this function follows the spin_trylock() convention, so
     881                 :            :  * it is negated from the down_trylock() return values! Be careful
     882                 :            :  * about this when converting semaphore users to mutexes.
     883                 :            :  *
     884                 :            :  * This function must not be used in interrupt context. The
     885                 :            :  * mutex must be released by the same task that acquired it.
     886                 :            :  */
     887                 :          0 : int __sched mutex_trylock(struct mutex *lock)
     888                 :            : {
     889                 :            :         int ret;
     890                 :            : 
     891                 :     288434 :         ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
     892         [ +  + ]:     288419 :         if (ret)
     893                 :            :                 mutex_set_owner(lock);
     894                 :            : 
     895                 :          0 :         return ret;
     896                 :            : }
     897                 :            : EXPORT_SYMBOL(mutex_trylock);
     898                 :            : 
     899                 :            : #ifndef CONFIG_DEBUG_LOCK_ALLOC
     900                 :            : int __sched
     901                 :          0 : __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     902                 :            : {
     903                 :            :         int ret;
     904                 :            : 
     905                 :            :         might_sleep();
     906                 :            : 
     907                 :          0 :         ret = __mutex_fastpath_lock_retval(&lock->base.count);
     908                 :            : 
     909         [ #  # ]:          0 :         if (likely(!ret)) {
     910                 :            :                 ww_mutex_set_context_fastpath(lock, ctx);
     911                 :            :                 mutex_set_owner(&lock->base);
     912                 :            :         } else
     913                 :          0 :                 ret = __ww_mutex_lock_slowpath(lock, ctx);
     914                 :          0 :         return ret;
     915                 :            : }
     916                 :            : EXPORT_SYMBOL(__ww_mutex_lock);
     917                 :            : 
     918                 :            : int __sched
     919                 :          0 : __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
     920                 :            : {
     921                 :            :         int ret;
     922                 :            : 
     923                 :            :         might_sleep();
     924                 :            : 
     925                 :          0 :         ret = __mutex_fastpath_lock_retval(&lock->base.count);
     926                 :            : 
     927         [ #  # ]:          0 :         if (likely(!ret)) {
     928                 :            :                 ww_mutex_set_context_fastpath(lock, ctx);
     929                 :            :                 mutex_set_owner(&lock->base);
     930                 :            :         } else
     931                 :          0 :                 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
     932                 :          0 :         return ret;
     933                 :            : }
     934                 :            : EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
     935                 :            : 
     936                 :            : #endif
     937                 :            : 
     938                 :            : /**
     939                 :            :  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
     940                 :            :  * @cnt: the atomic which we are to dec
     941                 :            :  * @lock: the mutex to return holding if we dec to 0
     942                 :            :  *
     943                 :            :  * return true and hold lock if we dec to 0, return false otherwise
     944                 :            :  */
     945                 :          0 : int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
     946                 :            : {
     947                 :            :         /* dec if we can't possibly hit 0 */
     948         [ #  # ]:          0 :         if (atomic_add_unless(cnt, -1, 1))
     949                 :            :                 return 0;
     950                 :            :         /* we might hit 0, so take the lock */
     951                 :          0 :         mutex_lock(lock);
     952         [ #  # ]:          0 :         if (!atomic_dec_and_test(cnt)) {
     953                 :            :                 /* when we actually did the dec, we didn't hit 0 */
     954                 :          0 :                 mutex_unlock(lock);
     955                 :          0 :                 return 0;
     956                 :            :         }
     957                 :            :         /* we hit 0, and we hold the lock */
     958                 :            :         return 1;
     959                 :            : }
     960                 :            : EXPORT_SYMBOL(atomic_dec_and_mutex_lock);

Generated by: LCOV version 1.9