LCOV - code coverage report
Current view: top level - lib - percpu-refcount.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 34 0.0 %
Date: 2014-02-18 Functions: 0 4 0.0 %
Branches: 0 28 0.0 %

           Branch data     Line data    Source code
       1                 :            : #define pr_fmt(fmt) "%s: " fmt "\n", __func__
       2                 :            : 
       3                 :            : #include <linux/kernel.h>
       4                 :            : #include <linux/percpu-refcount.h>
       5                 :            : 
       6                 :            : /*
       7                 :            :  * Initially, a percpu refcount is just a set of percpu counters. Initially, we
       8                 :            :  * don't try to detect the ref hitting 0 - which means that get/put can just
       9                 :            :  * increment or decrement the local counter. Note that the counter on a
      10                 :            :  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
      11                 :            :  * percpu counters will all sum to the correct value
      12                 :            :  *
      13                 :            :  * (More precisely: because moduler arithmatic is commutative the sum of all the
      14                 :            :  * pcpu_count vars will be equal to what it would have been if all the gets and
      15                 :            :  * puts were done to a single integer, even if some of the percpu integers
      16                 :            :  * overflow or underflow).
      17                 :            :  *
      18                 :            :  * The real trick to implementing percpu refcounts is shutdown. We can't detect
      19                 :            :  * the ref hitting 0 on every put - this would require global synchronization
      20                 :            :  * and defeat the whole purpose of using percpu refs.
      21                 :            :  *
      22                 :            :  * What we do is require the user to keep track of the initial refcount; we know
      23                 :            :  * the ref can't hit 0 before the user drops the initial ref, so as long as we
      24                 :            :  * convert to non percpu mode before the initial ref is dropped everything
      25                 :            :  * works.
      26                 :            :  *
      27                 :            :  * Converting to non percpu mode is done with some RCUish stuff in
      28                 :            :  * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
      29                 :            :  * can't hit 0 before we've added up all the percpu refs.
      30                 :            :  */
      31                 :            : 
      32                 :            : #define PCPU_COUNT_BIAS         (1U << 31)
      33                 :            : 
      34                 :            : /**
      35                 :            :  * percpu_ref_init - initialize a percpu refcount
      36                 :            :  * @ref: percpu_ref to initialize
      37                 :            :  * @release: function which will be called when refcount hits 0
      38                 :            :  *
      39                 :            :  * Initializes the refcount in single atomic counter mode with a refcount of 1;
      40                 :            :  * analagous to atomic_set(ref, 1).
      41                 :            :  *
      42                 :            :  * Note that @release must not sleep - it may potentially be called from RCU
      43                 :            :  * callback context by percpu_ref_kill().
      44                 :            :  */
      45                 :          0 : int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
      46                 :            : {
      47                 :          0 :         atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
      48                 :            : 
      49                 :          0 :         ref->pcpu_count = alloc_percpu(unsigned);
      50         [ #  # ]:          0 :         if (!ref->pcpu_count)
      51                 :            :                 return -ENOMEM;
      52                 :            : 
      53                 :          0 :         ref->release = release;
      54                 :          0 :         return 0;
      55                 :            : }
      56                 :            : EXPORT_SYMBOL_GPL(percpu_ref_init);
      57                 :            : 
      58                 :            : /**
      59                 :            :  * percpu_ref_cancel_init - cancel percpu_ref_init()
      60                 :            :  * @ref: percpu_ref to cancel init for
      61                 :            :  *
      62                 :            :  * Once a percpu_ref is initialized, its destruction is initiated by
      63                 :            :  * percpu_ref_kill() and completes asynchronously, which can be painful to
      64                 :            :  * do when destroying a half-constructed object in init failure path.
      65                 :            :  *
      66                 :            :  * This function destroys @ref without invoking @ref->release and the
      67                 :            :  * memory area containing it can be freed immediately on return.  To
      68                 :            :  * prevent accidental misuse, it's required that @ref has finished
      69                 :            :  * percpu_ref_init(), whether successful or not, but never used.
      70                 :            :  *
      71                 :            :  * The weird name and usage restriction are to prevent people from using
      72                 :            :  * this function by mistake for normal shutdown instead of
      73                 :            :  * percpu_ref_kill().
      74                 :            :  */
      75                 :          0 : void percpu_ref_cancel_init(struct percpu_ref *ref)
      76                 :            : {
      77                 :          0 :         unsigned __percpu *pcpu_count = ref->pcpu_count;
      78                 :            :         int cpu;
      79                 :            : 
      80 [ #  # ][ #  # ]:          0 :         WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
                 [ #  # ]
      81                 :            : 
      82         [ #  # ]:          0 :         if (pcpu_count) {
      83         [ #  # ]:          0 :                 for_each_possible_cpu(cpu)
      84 [ #  # ][ #  # ]:          0 :                         WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
                 [ #  # ]
      85                 :          0 :                 free_percpu(ref->pcpu_count);
      86                 :            :         }
      87                 :          0 : }
      88                 :            : EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
      89                 :            : 
      90                 :          0 : static void percpu_ref_kill_rcu(struct rcu_head *rcu)
      91                 :            : {
      92                 :          0 :         struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
      93                 :          0 :         unsigned __percpu *pcpu_count = ref->pcpu_count;
      94                 :            :         unsigned count = 0;
      95                 :            :         int cpu;
      96                 :            : 
      97                 :            :         /* Mask out PCPU_REF_DEAD */
      98                 :          0 :         pcpu_count = (unsigned __percpu *)
      99                 :          0 :                 (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
     100                 :            : 
     101         [ #  # ]:          0 :         for_each_possible_cpu(cpu)
     102                 :          0 :                 count += *per_cpu_ptr(pcpu_count, cpu);
     103                 :            : 
     104                 :          0 :         free_percpu(pcpu_count);
     105                 :            : 
     106                 :          0 :         pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
     107                 :            : 
     108                 :            :         /*
     109                 :            :          * It's crucial that we sum the percpu counters _before_ adding the sum
     110                 :            :          * to &ref->count; since gets could be happening on one cpu while puts
     111                 :            :          * happen on another, adding a single cpu's count could cause
     112                 :            :          * @ref->count to hit 0 before we've got a consistent value - but the
     113                 :            :          * sum of all the counts will be consistent and correct.
     114                 :            :          *
     115                 :            :          * Subtracting the bias value then has to happen _after_ adding count to
     116                 :            :          * &ref->count; we need the bias value to prevent &ref->count from
     117                 :            :          * reaching 0 before we add the percpu counts. But doing it at the same
     118                 :            :          * time is equivalent and saves us atomic operations:
     119                 :            :          */
     120                 :            : 
     121                 :          0 :         atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
     122                 :            : 
     123                 :            :         /* @ref is viewed as dead on all CPUs, send out kill confirmation */
     124         [ #  # ]:          0 :         if (ref->confirm_kill)
     125                 :          0 :                 ref->confirm_kill(ref);
     126                 :            : 
     127                 :            :         /*
     128                 :            :          * Now we're in single atomic_t mode with a consistent refcount, so it's
     129                 :            :          * safe to drop our initial ref:
     130                 :            :          */
     131                 :            :         percpu_ref_put(ref);
     132                 :          0 : }
     133                 :            : 
     134                 :            : /**
     135                 :            :  * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
     136                 :            :  * @ref: percpu_ref to kill
     137                 :            :  * @confirm_kill: optional confirmation callback
     138                 :            :  *
     139                 :            :  * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
     140                 :            :  * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
     141                 :            :  * called after @ref is seen as dead from all CPUs - all further
     142                 :            :  * invocations of percpu_ref_tryget() will fail.  See percpu_ref_tryget()
     143                 :            :  * for more details.
     144                 :            :  *
     145                 :            :  * Due to the way percpu_ref is implemented, @confirm_kill will be called
     146                 :            :  * after at least one full RCU grace period has passed but this is an
     147                 :            :  * implementation detail and callers must not depend on it.
     148                 :            :  */
     149                 :          0 : void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
     150                 :            :                                  percpu_ref_func_t *confirm_kill)
     151                 :            : {
     152 [ #  # ][ #  # ]:          0 :         WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
                 [ #  # ]
     153                 :            :                   "percpu_ref_kill() called more than once!\n");
     154                 :            : 
     155                 :          0 :         ref->pcpu_count = (unsigned __percpu *)
     156                 :          0 :                 (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
     157                 :          0 :         ref->confirm_kill = confirm_kill;
     158                 :            : 
     159                 :          0 :         call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
     160                 :          0 : }
     161                 :            : EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);

Generated by: LCOV version 1.9